sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/transformers:src/transformers/models/arcee/modular_arcee.py | # Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Arcee model."""
from transformers.utils import auto_docstring, logging
from ...modeling_rope_utils import RopeParameters
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import (
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
)
from ..nemotron.modeling_nemotron import NemotronMLP
logger = logging.get_logger(__name__)
class ArceeConfig(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`ArceeModel`]. It is used to instantiate an Arcee
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the AFM-4.5B-Base.
Pre-trained weights are available at
[arcee-ai/AFM-4.5B](https://huggingface.co/arcee-ai/AFM-4.5B)
and were used to build the examples below.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Arcee model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ArceeModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 18432):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. AFM-4.5B-Base supports up to 16384 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 128000):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 128001):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import ArceeModel, ArceeConfig
>>> # Initializing an Arcee AFM-4.5B-Base style configuration
>>> configuration = ArceeConfig()
>>> # Initializing a model from the AFM-4.5B-Base style configuration
>>> model = ArceeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "arcee"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
def __init__(
self,
vocab_size: int | None = 32000,
hidden_size: int | None = 2560,
intermediate_size: int | None = 18432,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = None,
hidden_act: str | None = "relu2",
max_position_embeddings: int | None = 4096,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 128000,
eos_token_id: int | None = 128001,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
head_dim: int | None = None,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
mlp_bias=mlp_bias,
head_dim=head_dim,
**kwargs,
)
del self.pretraining_tp
class ArceeMLP(NemotronMLP):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForCausalLM(LlamaForCausalLM):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForSequenceClassification(LlamaForSequenceClassification):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForQuestionAnswering(LlamaForQuestionAnswering):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForTokenClassification(LlamaForTokenClassification):
pass
__all__ = [
"ArceeConfig",
"ArceeForCausalLM",
"ArceeForQuestionAnswering",
"ArceeForSequenceClassification",
"ArceeForTokenClassification",
"ArceeModel", # noqa: F822
"ArceePreTrainedModel", # noqa: F822
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/arcee/modular_arcee.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/arcee/test_modeling_arcee.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Arcee model."""
import unittest
from pytest import mark
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import (
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
ArceeConfig,
ArceeForCausalLM,
ArceeModel,
)
class ArceeModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = ArceeModel
@require_torch
class ArceeModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = ArceeModelTester
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = ArceeForCausalLM if is_torch_available() else None
def test_arcee_mlp_uses_relu_squared(self):
"""Test that ArceeMLP uses ReLU² activation instead of SiLU."""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
config.hidden_act = "relu2" # Ensure we're using relu2 activation
model = ArceeModel(config)
# Check that the MLP layers use the correct activation
mlp = model.layers[0].mlp
# Test with a simple input
x = torch.randn(1, 10, config.hidden_size)
up_output = mlp.up_proj(x)
# Verify ReLU² activation: x * relu(x)
expected_activation = up_output * torch.relu(up_output)
actual_activation = mlp.act_fn(up_output)
self.assertTrue(torch.allclose(expected_activation, actual_activation, atol=1e-5))
@require_torch_accelerator
class ArceeIntegrationTest(unittest.TestCase):
def tearDown(self):
import gc
gc.collect()
torch.cuda.empty_cache()
@slow
def test_model_from_pretrained(self):
# This test would be enabled once a pretrained model is available
# For now, we just test that the model can be instantiated
config = ArceeConfig()
model = ArceeForCausalLM(config)
self.assertIsInstance(model, ArceeForCausalLM)
@mark.skip(reason="Model is not currently public - will update test post release")
@slow
def test_model_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""Once upon a time,In a village there was a farmer who had three sons. The farmer was very old and he"""
)
prompt = "Once upon a time"
tokenizer = AutoTokenizer.from_pretrained("arcee-ai/model-id")
model = ArceeForCausalLM.from_pretrained("arcee-ai/model-id", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=20)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@mark.skip(reason="Model is not currently public - will update test post release")
@slow
@require_flash_attn
@mark.flash_attn_test
def test_model_generation_flash_attn(self):
EXPECTED_TEXT_COMPLETION = (
" the food, the people, and the overall experience. I would definitely recommend this place to others."
)
prompt = "This is a nice place. " * 1024 + "I really enjoy the scenery,"
tokenizer = AutoTokenizer.from_pretrained("arcee-ai/model-id")
model = ArceeForCausalLM.from_pretrained(
"arcee-ai/model-id", device_map="auto", attn_implementation="flash_attention_2", dtype="auto"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=20)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text[len(prompt) :])
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/arcee/test_modeling_arcee.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/idefics2/image_processing_idefics2_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
BatchFeature,
SizeDict,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ImageInput,
PILImageResampling,
make_nested_list_of_images,
)
from ...processing_utils import Unpack
from ...utils import TensorType, auto_docstring, is_torchvision_available, logging
from .image_processing_idefics2 import Idefics2ImageProcessorKwargs, convert_to_rgb
if is_torchvision_available():
from torchvision.transforms import functional as F
logger = logging.get_logger(__name__)
def get_resize_output_image_size(image: "torch.Tensor", size: SizeDict) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the output image containing the keys "shortest_edge" and "longest_edge".
Returns:
The output size of the image after resizing.
"""
height, width = image.size()[-2:]
min_len = size.shortest_edge
max_len = size.longest_edge
aspect_ratio = width / height
if width >= height and width > max_len:
width = max_len
height = int(width / aspect_ratio)
elif height > width and height > max_len:
height = max_len
width = int(height * aspect_ratio)
height = max(height, min_len)
width = max(width, min_len)
return height, width
def get_max_height_width(images_list: list[list["torch.Tensor"]]) -> tuple[int, int]:
"""
Get the maximum height and width across all images in a batch.
"""
image_sizes = []
for images in images_list:
for image in images:
image_sizes.append(image.size()[-2:])
max_height = max(size[0] for size in image_sizes)
max_width = max(size[1] for size in image_sizes)
return (max_height, max_width)
def make_pixel_mask(image: "torch.Tensor", output_size: tuple[int, int]) -> "torch.Tensor":
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`torch.Tensor`):
Image to make the pixel mask for.
output_size (`Tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = image.size()[-2:]
mask = torch.zeros(output_size, dtype=torch.int64, device=image.device)
mask[:input_height, :input_width] = 1
return mask
@auto_docstring
class Idefics2ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
do_convert_rgb = True
do_image_splitting = False
size = {"shortest_edge": 378, "longest_edge": 980}
model_input_names = ["pixel_values", "pixel_attention_mask"]
valid_kwargs = Idefics2ImageProcessorKwargs
def convert_to_rgb(self, image: ImageInput) -> ImageInput:
"""
Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
as is.
"""
return convert_to_rgb(image)
def resize(
self, image: torch.Tensor, size: SizeDict, interpolation: Optional["F.InterpolationMode"] = None, **kwargs
) -> torch.Tensor:
"""
Resize an image using torchvision's functional resize.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
new_size = get_resize_output_image_size(image, size)
elif size.height and size.width:
new_size = (size.height, size.width)
else:
raise ValueError("Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys.")
image = F.resize(image, size=new_size, interpolation=interpolation, **kwargs)
return image
def _prepare_images_structure(self, images: ImageInput, expected_ndims: int = 3) -> ImageInput:
"""
Prepare a nested images structure for processing.
"""
images = self.fetch_images(images)
return make_nested_list_of_images(images, expected_ndims=expected_ndims)
def split_images(
self,
images: "torch.Tensor",
) -> list["torch.Tensor"]:
"""
Split a batch of images into 4 equal sub-images, and concatenate that sequence with the original image.
"""
height, width = images.size()[-2:]
mid_width = width // 2
mid_height = height // 2
batch_split_images = [
images[..., :mid_height, :mid_width],
images[..., :mid_height, mid_width:],
images[..., mid_height:, :mid_width],
images[..., mid_height:, mid_width:],
images,
]
# transpose the batch dimension to the first dimension
batch_split_images = [[image[i] for image in batch_split_images] for i in range(len(batch_split_images[0]))]
return batch_split_images
def pad(
self, image: "torch.Tensor", padded_size: tuple[int, int], fill: int = 0
) -> tuple["torch.Tensor", "torch.Tensor"]:
"""
Pad an image to the specified size and create the corresponding pixel mask.
"""
original_size = image.shape[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
# Only pad if necessary
if original_size != padded_size:
# torchvision's pad takes a 4-element tuple for 2D padding: (left, top, right, bottom)
padding = (0, 0, padding_right, padding_bottom)
# Use constant padding to match slow implementation
image = F.pad(image, padding, fill=fill, padding_mode="constant")
# Create pixel mask to match the slow implementation
pixel_mask = torch.zeros(padded_size, dtype=torch.int64, device=image.device)
pixel_mask[: original_size[0], : original_size[1]] = 1
return image, pixel_mask
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Idefics2ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list[list["torch.Tensor"]],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_pad: bool | None,
do_image_splitting: bool | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
"""
Process a batch of images for the model.
"""
grouped_images, grouped_images_index = group_images_by_shape(
images, is_nested=True, disable_grouping=disable_grouping
)
split_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_image_splitting:
stacked_images = self.split_images(stacked_images)
split_images_grouped[shape] = stacked_images
split_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True)
if do_image_splitting:
# flattenened the doubly nested list to a nested list
for i, group_images in enumerate(split_images):
split_images[i] = [image for sublist in group_images for image in sublist]
# Group images by size for further processing
grouped_images, grouped_images_index = group_images_by_shape(
split_images, is_nested=True, disable_grouping=disable_grouping
)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index, is_nested=True)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, is_nested=True, disable_grouping=disable_grouping
)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
if do_pad:
# Get max images per batch
max_num_images = max(len(images_) for images_ in processed_images)
max_height, max_width = get_max_height_width(processed_images)
processed_images_padded = torch.zeros(
len(processed_images),
max_num_images,
*(processed_images[0][0].shape[0], max_height, max_width),
device=processed_images[0][0].device,
)
pixel_attention_masks = torch.zeros(
len(processed_images),
max_num_images,
*(max_height, max_width),
device=processed_images[0][0].device,
)
for i, images in enumerate(processed_images):
for j, image in enumerate(images):
processed_images_padded[i, j], pixel_attention_masks[i, j] = self.pad(
image, (max_height, max_width)
)
processed_images = processed_images_padded
if do_pad:
data = {"pixel_values": processed_images, "pixel_attention_mask": pixel_attention_masks}
elif return_tensors == "pt":
data = {"pixel_values": torch.stack([torch.stack(images) for images in processed_images])}
else:
data = {"pixel_values": processed_images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["Idefics2ImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/idefics2/image_processing_idefics2_fast.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/idefics3/image_processing_idefics3_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional
import torch
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
BatchFeature,
SizeDict,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ImageInput,
PILImageResampling,
make_nested_list_of_images,
)
from ...processing_utils import Unpack
from ...utils import TensorType, auto_docstring, is_torchvision_available, logging
from .image_processing_idefics3 import Idefics3ImageProcessorKwargs
if is_torchvision_available():
from torchvision.transforms import functional as F
logger = logging.get_logger(__name__)
MAX_IMAGE_SIZE = 4096 # 4k resolution as absolute maximum
def _resize_output_size_rescale_to_max_len(
height: int, width: int, min_len: int | None = 1, max_len: int | None = None
) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
min_len (`int`, *optional*, defaults to 1):
Minimum size of the output image.
max_len (`int`, *optional*, defaults to the maximum size of the image):
Maximum size of the output image.
Returns:
The output size of the image after resizing.
"""
max_len = max(height, width) if max_len is None else max_len
aspect_ratio = width / height
if width >= height:
width = max_len
height = int(width / aspect_ratio)
if height % 2 != 0:
height += 1
elif height > width:
height = max_len
width = int(height * aspect_ratio)
if width % 2 != 0:
width += 1
# Avoid resizing to a size smaller than min_len
height = max(height, min_len)
width = max(width, min_len)
return height, width
def _resize_output_size_scale_below_upper_bound(
height: int, width: int, max_len: dict[str, int] | None = None
) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
max_len (`Dict[str, int]`, *optional*, defaults to the maximum size of the image):
Defines the maximum dimensions of the image.
Returns:
The output size of the image after resizing.
"""
max_len = max(height, width) if max_len is None else max_len
aspect_ratio = width / height
if width >= height and width > max_len:
width = max_len
height = int(width / aspect_ratio)
elif height > width and height > max_len:
height = max_len
width = int(height * aspect_ratio)
# Avoid resizing to a size smaller than 1
height = max(height, 1)
width = max(width, 1)
return height, width
def get_resize_output_image_size(
image,
resolution_max_side: int,
) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
image (`torch.Tensor`):
Image to resize.
resolution_max_side (`int`):
The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the
input aspect ratio.
Returns:
The output size of the image after resizing.
"""
height, width = image.size()[-2:]
# Find the output size, when rescaling the longest edge to max_len and preserving the aspect ratio
height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side)
# Find the output size when scaling the image to be below the MAX_IMAGE_SIZE
height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE)
return height, width
def get_max_height_width(images_list: list[list["torch.Tensor"]]) -> tuple[int, int]:
"""
Get the maximum height and width across all images in a batch.
"""
image_sizes = []
for images in images_list:
for image in images:
image_sizes.append(image.size()[-2:])
max_height = max(size[0] for size in image_sizes)
max_width = max(size[1] for size in image_sizes)
return (max_height, max_width)
def get_num_channels(images_list: list[list["torch.Tensor"]]) -> int:
"""
Get the number of channels across all images in a batch. Handle empty sublists like in [[], [image]].
"""
for images in images_list:
if images:
return images[0].shape[0]
raise ValueError("No images found in the batch.")
def get_device_from_images(images_list: list[list["torch.Tensor"]]) -> "torch.device":
"""
Get the device from the first non-empty element in a nested list of images.
Handle empty sublists like in [[], [image]].
"""
for images in images_list:
if images:
return images[0].device
def make_pixel_mask(image: "torch.Tensor", output_size: tuple[int, int]) -> "torch.Tensor":
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`torch.Tensor`):
Image to make the pixel mask for.
output_size (`Tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = image.size()[-2:]
mask = torch.zeros(output_size, dtype=torch.int64, device=image.device)
mask[:input_height, :input_width] = 1
return mask
@auto_docstring
class Idefics3ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.LANCZOS
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"longest_edge": 4 * 364}
max_image_size = {"longest_edge": 364}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_image_splitting = True
do_pad = True
return_row_col_info = False
valid_kwargs = Idefics3ImageProcessorKwargs
model_input_names = ["pixel_values", "pixel_attention_mask"]
def _prepare_images_structure(self, images: ImageInput, expected_ndims: int = 3) -> ImageInput:
"""
Prepare a nested images structure for processing.
"""
# Checks for `str` in case of URL/local path and optionally loads images
images = self.fetch_images(images)
return make_nested_list_of_images(images, expected_ndims=expected_ndims)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image. The longest edge of the image is resized to size.longest_edge, with the shortest edge
resized to keep the input aspect ratio. Can also be used with size.height and size.width.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing when resizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if interpolation == F.InterpolationMode.LANCZOS:
logger.warning_once(
"You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. "
"BICUBIC resample will be used as an alternative. Please fall back to slow image processor if you "
"want full consistency with the original model."
)
interpolation = F.InterpolationMode.BICUBIC
if size.longest_edge:
size = get_resize_output_image_size(image, resolution_max_side=size.longest_edge)
elif size.height and size.width:
size = (size.height, size.width)
else:
raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.")
return F.resize(image, size, interpolation=interpolation, antialias=antialias)
def split_images(
self,
images: torch.Tensor,
max_image_size: dict[str, int],
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Split an image into squares of side max_image_size and the original image resized to max_image_size.
That means that a single image becomes a sequence of images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio.
2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)`
sub-images of the same size each (image_size, image_size). Typically, 364x364.
3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width.
Args:
images (`torch.Tensor`):
Images to split.
max_image_size (`Dict[str, int]`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
"""
batch_size, num_channels, height, width = images.size()
height_dim, width_dim = 2, 3
max_height = max_width = max_image_size["longest_edge"]
frames = []
if height > max_height or width > max_width:
# Calculate the number of splits
num_splits_h = math.ceil(height / max_height)
num_splits_w = math.ceil(width / max_width)
# Split the images by height, then by width
frames = (
images.unfold(height_dim, size=max_height, step=max_height)
.unfold(width_dim, size=max_width, step=max_width)
.contiguous()
.view(batch_size, num_channels, -1, max_height, max_width)
.permute(0, 2, 1, 3, 4)
) # batch_size x n_frames x num_channels x height x width
# For the global image at the end, we resize it to match the max_image_size, for cpu memory efficiency
global_image_height, global_image_width = max_height, max_width
images = self.resize(
images, SizeDict(height=global_image_height, width=global_image_width), interpolation=interpolation
)
frames = torch.cat((frames, images.unsqueeze(1)), dim=1)
else:
num_splits_h, num_splits_w = 0, 0
frames = images.unsqueeze(1)
num_splits_h = [num_splits_h] * batch_size
num_splits_w = [num_splits_w] * batch_size
return frames, num_splits_h, num_splits_w
def resize_for_vision_encoder(
self,
image: torch.Tensor,
vision_encoder_max_size: int,
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
Args:
image (`torch.Tensor`):
Images to resize.
vision_encoder_max_size (`int`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
"""
height, width = image.size()[-2:]
aspect_ratio = width / height
if width >= height:
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
height = int(width / aspect_ratio)
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
elif height > width:
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
width = int(height * aspect_ratio)
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
new_size = SizeDict(height=height, width=width)
return self.resize(image, size=new_size, interpolation=interpolation)
def pad(
self,
image: torch.Tensor,
padded_size: tuple[int, int],
fill: int = 0,
return_pixel_mask: bool = True,
):
original_size = image.shape[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
# Only pad if necessary
if original_size != padded_size:
padding = (0, 0, padding_right, padding_bottom)
image = F.pad(image, padding, fill=fill, padding_mode="constant")
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = None
if return_pixel_mask:
pixel_mask = torch.zeros_like(image[..., 0, :, :], dtype=torch.int64)
pixel_mask[: original_size[0], : original_size[1]] = 1
return image, pixel_mask
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Idefics3ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list[list["torch.Tensor"]],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
do_pad: bool | None,
do_image_splitting: bool | None,
max_image_size: dict[str, int] | None,
return_row_col_info: bool | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
"""
Process a batch of images for the model.
"""
grouped_images, grouped_images_index = group_images_by_shape(
images, is_nested=True, disable_grouping=disable_grouping
)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index, is_nested=True)
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, is_nested=True, disable_grouping=disable_grouping
)
split_images_grouped = {}
if do_image_splitting:
rows_grouped = {}
cols_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self.resize_for_vision_encoder(
stacked_images, max_image_size["longest_edge"], interpolation=interpolation
)
stacked_images, rows, cols = self.split_images(
stacked_images, max_image_size=max_image_size, interpolation=interpolation
)
split_images_grouped[shape] = stacked_images
rows_grouped[shape] = rows
cols_grouped[shape] = cols
processed_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True)
rows = reorder_images(rows_grouped, grouped_images_index, is_nested=True)
cols = reorder_images(cols_grouped, grouped_images_index, is_nested=True)
# flattenened the doubly nested list to a nested list
for i, group_images in enumerate(processed_images):
processed_images[i] = [image for sublist in group_images for image in sublist]
else:
for shape, stacked_images in grouped_images.items():
# We square the images to max_image_size
stacked_images = self.resize(
image=stacked_images,
size=SizeDict(height=max_image_size["longest_edge"], width=max_image_size["longest_edge"]),
interpolation=interpolation,
)
split_images_grouped[shape] = stacked_images
processed_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True)
rows = [[0] * len(images) for images in processed_images]
cols = [[0] * len(images) for images in processed_images]
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(
processed_images, is_nested=True, disable_grouping=disable_grouping
)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
if do_pad:
# Get max images per batch
max_num_images = max(len(images_) for images_ in processed_images)
max_height, max_width = get_max_height_width(processed_images)
num_channels = get_num_channels(processed_images)
device = get_device_from_images(processed_images)
processed_images_padded = torch.zeros(
len(processed_images),
max_num_images,
*(num_channels, max_height, max_width),
device=device,
)
pixel_attention_masks = torch.zeros(
len(processed_images),
max_num_images,
*(max_height, max_width),
device=device,
)
for i, images in enumerate(processed_images):
for j, image in enumerate(images):
processed_images_padded[i, j], pixel_attention_masks[i, j] = self.pad(
image, (max_height, max_width)
)
processed_images = processed_images_padded
if do_pad:
data = {"pixel_values": processed_images, "pixel_attention_mask": pixel_attention_masks}
elif return_tensors == "pt":
data = {"pixel_values": torch.stack([torch.stack(images) for images in processed_images])}
else:
data = {"pixel_values": processed_images}
# This is needed for generating correct text inputs in the processor - we don't pad to the max number of images
encoding = BatchFeature(data=data, tensor_type=return_tensors)
if return_row_col_info:
encoding["rows"] = rows
encoding["cols"] = cols
return encoding
def to_dict(self):
encoder_dict = super().to_dict()
encoder_dict.pop("_valid_processor_keys", None)
encoder_dict.pop("return_row_col_info", None)
return encoder_dict
def get_number_of_image_patches(self, height: int, width: int, images_kwargs: dict):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
do_image_splitting = images_kwargs.get("do_image_splitting", self.do_image_splitting)
max_image_size = images_kwargs.get("max_image_size", self.max_image_size)
size = images_kwargs.get("size", self.size)
num_patches = num_rows = num_cols = 0
if do_image_splitting:
height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size["longest_edge"])
height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE)
aspect_ratio = width / height
if width >= height:
resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
resized_height = int(width / aspect_ratio)
resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
elif height > width:
resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
resized_width = int(height * aspect_ratio)
resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
max_height = max_width = max_image_size["longest_edge"]
if resized_height > max_height or resized_width > max_width:
# Calculate the number of splits
num_rows = math.ceil(resized_height / max_height)
num_cols = math.ceil(resized_width / max_width)
num_patches = num_rows * num_cols + 1
return num_patches, num_rows, num_cols
__all__ = ["Idefics3ImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/idefics3/image_processing_idefics3_fast.py",
"license": "Apache License 2.0",
"lines": 482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/dpt/modular_dpt.py | # Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections.abc import Iterable
from typing import TYPE_CHECKING, Optional
import torch
from ...image_processing_base import BatchFeature
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import group_images_by_shape, reorder_images
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
PILImageResampling,
SizeDict,
)
from ...utils import (
TensorType,
auto_docstring,
requires_backends,
)
from ..beit.image_processing_beit_fast import BeitImageProcessorFast
from .image_processing_dpt import DPTImageProcessorKwargs
if TYPE_CHECKING:
from ...modeling_outputs import DepthEstimatorOutput
import torchvision.transforms.v2.functional as tvF
def get_resize_output_image_size(
input_image: "torch.Tensor",
output_size: int | Iterable[int],
keep_aspect_ratio: bool,
multiple: int,
) -> SizeDict:
def constrain_to_multiple_of(val, multiple, min_val=0, max_val=None):
x = round(val / multiple) * multiple
if max_val is not None and x > max_val:
x = math.floor(val / multiple) * multiple
if x < min_val:
x = math.ceil(val / multiple) * multiple
return x
input_height, input_width = input_image.shape[-2:]
output_height, output_width = output_size
# determine new height and width
scale_height = output_height / input_height
scale_width = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple)
new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple)
return SizeDict(height=new_height, width=new_width)
@auto_docstring
class DPTImageProcessorFast(BeitImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 384, "width": 384}
do_resize = True
do_rescale = True
do_normalize = True
do_pad = False
rescale_factor = 1 / 255
ensure_multiple_of = 1
keep_aspect_ratio = False
crop_size = None
do_center_crop = None
do_reduce_labels = None
valid_kwargs = DPTImageProcessorKwargs
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
antialias: bool = True,
ensure_multiple_of: int | None = 1,
keep_aspect_ratio: bool = False,
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing when resizing the image
ensure_multiple_of (`int`, *optional*):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, and `do_resize` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
Returns:
`torch.Tensor`: The resized image.
"""
if not size.height or not size.width:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
output_size = get_resize_output_image_size(
image,
output_size=(size.height, size.width),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
)
return BaseImageProcessorFast.resize(
self, image, output_size, interpolation=interpolation, antialias=antialias
)
def pad_image(
self,
image: "torch.Tensor",
size_divisor: int = 1,
) -> "torch.Tensor":
r"""
Center pad a batch of images to be a multiple of `size_divisor`.
Args:
image (`torch.Tensor`):
Image to pad. Can be a batch of images of dimensions (N, C, H, W) or a single image of dimensions (C, H, W).
size_divisor (`int`):
The width and height of the image will be padded to a multiple of this number.
"""
height, width = image.shape[-2:]
def _get_pad(size, size_divisor):
new_size = math.ceil(size / size_divisor) * size_divisor
pad_size = new_size - size
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
pad_top, pad_bottom = _get_pad(height, size_divisor)
pad_left, pad_right = _get_pad(width, size_divisor)
padding = (pad_left, pad_top, pad_right, pad_bottom)
return tvF.pad(image, padding)
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
keep_aspect_ratio: bool,
ensure_multiple_of: int | None,
do_pad: bool,
size_divisor: int | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
if do_reduce_labels:
images = self.reduce_label(images)
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images,
size=size,
interpolation=interpolation,
ensure_multiple_of=ensure_multiple_of,
keep_aspect_ratio=keep_aspect_ratio,
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
if do_pad:
stacked_images = self.pad_image(stacked_images, size_divisor)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_depth_estimation(
self,
outputs: "DepthEstimatorOutput",
target_sizes: TensorType | list[tuple[int, int]] | None | None = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
depth = torch.nn.functional.interpolate(
depth.unsqueeze(0).unsqueeze(1), size=target_size, mode="bicubic", align_corners=False
).squeeze()
results.append({"predicted_depth": depth})
return results
__all__ = ["DPTImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/dpt/modular_dpt.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/lightglue/convert_lightglue_to_hf.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
import re
import torch
from datasets import load_dataset
from transformers import (
AutoModelForKeypointDetection,
LightGlueForKeypointMatching,
LightGlueImageProcessor,
)
from transformers.models.lightglue.configuration_lightglue import LightGlueConfig
DEFAULT_CHECKPOINT_URL = "https://github.com/cvg/LightGlue/releases/download/v0.1_arxiv/superpoint_lightglue.pth"
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image0 = dataset[0]["image"]
image1 = dataset[1]["image"]
image2 = dataset[2]["image"]
# [image1, image1] on purpose to test the model early stopping
return [[image2, image0], [image1, image1]]
def verify_model_outputs(model, device):
images = prepare_imgs()
preprocessor = LightGlueImageProcessor()
inputs = preprocessor(images=images, return_tensors="pt").to(device)
model.to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_matches_values = outputs.matches[0, 0, 20:30]
predicted_matching_scores_values = outputs.matching_scores[0, 0, 20:30]
predicted_number_of_matches = torch.sum(outputs.matches[0][0] != -1).item()
expected_max_number_keypoints = 866
expected_matches_shape = torch.Size((len(images), 2, expected_max_number_keypoints))
expected_matching_scores_shape = torch.Size((len(images), 2, expected_max_number_keypoints))
expected_matches_values = torch.tensor([-1, -1, 5, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64).to(device)
expected_matching_scores_values = torch.tensor([0, 0, 0.2997, 0, 0, 0.6762, 0, 0.8826, 0, 0.5583]).to(device)
expected_number_of_matches = 140
assert outputs.matches.shape == expected_matches_shape
assert outputs.matching_scores.shape == expected_matching_scores_shape
assert torch.allclose(predicted_matches_values, expected_matches_values, atol=1e-2)
assert torch.allclose(predicted_matching_scores_values, expected_matching_scores_values, atol=1e-2)
assert predicted_number_of_matches == expected_number_of_matches
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"posenc.Wr": r"positional_encoder.projector",
r"self_attn.(\d+).Wqkv": r"transformer_layers.\1.self_attention.Wqkv",
r"self_attn.(\d+).out_proj": r"transformer_layers.\1.self_attention.o_proj",
r"self_attn.(\d+).ffn.0": r"transformer_layers.\1.self_mlp.fc1",
r"self_attn.(\d+).ffn.1": r"transformer_layers.\1.self_mlp.layer_norm",
r"self_attn.(\d+).ffn.3": r"transformer_layers.\1.self_mlp.fc2",
r"cross_attn.(\d+).to_qk": r"transformer_layers.\1.cross_attention.to_qk",
r"cross_attn.(\d+).to_v": r"transformer_layers.\1.cross_attention.v_proj",
r"cross_attn.(\d+).to_out": r"transformer_layers.\1.cross_attention.o_proj",
r"cross_attn.(\d+).ffn.0": r"transformer_layers.\1.cross_mlp.fc1",
r"cross_attn.(\d+).ffn.1": r"transformer_layers.\1.cross_mlp.layer_norm",
r"cross_attn.(\d+).ffn.3": r"transformer_layers.\1.cross_mlp.fc2",
r"log_assignment.(\d+).matchability": r"match_assignment_layers.\1.matchability",
r"log_assignment.(\d+).final_proj": r"match_assignment_layers.\1.final_projection",
r"token_confidence.(\d+).token.0": r"token_confidence.\1.token",
}
def convert_old_keys_to_new_keys(state_dict_keys: list[str]):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def add_keypoint_detector_state_dict(lightglue_state_dict):
keypoint_detector = AutoModelForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
keypoint_detector_state_dict = keypoint_detector.state_dict()
for k, v in keypoint_detector_state_dict.items():
lightglue_state_dict[f"keypoint_detector.{k}"] = v
return lightglue_state_dict
def split_weights(state_dict):
for i in range(9):
# Remove unused r values
log_assignment_r_key = f"log_assignment.{i}.r"
if state_dict.get(log_assignment_r_key, None) is not None:
state_dict.pop(log_assignment_r_key)
Wqkv_weight = state_dict.pop(f"transformer_layers.{i}.self_attention.Wqkv.weight")
Wqkv_bias = state_dict.pop(f"transformer_layers.{i}.self_attention.Wqkv.bias")
Wqkv_weight = Wqkv_weight.reshape(256, 3, 256)
Wqkv_bias = Wqkv_bias.reshape(256, 3)
query_weight, key_weight, value_weight = Wqkv_weight[:, 0], Wqkv_weight[:, 1], Wqkv_weight[:, 2]
query_bias, key_bias, value_bias = Wqkv_bias[:, 0], Wqkv_bias[:, 1], Wqkv_bias[:, 2]
state_dict[f"transformer_layers.{i}.self_attention.q_proj.weight"] = query_weight
state_dict[f"transformer_layers.{i}.self_attention.k_proj.weight"] = key_weight
state_dict[f"transformer_layers.{i}.self_attention.v_proj.weight"] = value_weight
state_dict[f"transformer_layers.{i}.self_attention.q_proj.bias"] = query_bias
state_dict[f"transformer_layers.{i}.self_attention.k_proj.bias"] = key_bias
state_dict[f"transformer_layers.{i}.self_attention.v_proj.bias"] = value_bias
to_qk_weight = state_dict.pop(f"transformer_layers.{i}.cross_attention.to_qk.weight")
to_qk_bias = state_dict.pop(f"transformer_layers.{i}.cross_attention.to_qk.bias")
state_dict[f"transformer_layers.{i}.cross_attention.q_proj.weight"] = to_qk_weight
state_dict[f"transformer_layers.{i}.cross_attention.q_proj.bias"] = to_qk_bias
state_dict[f"transformer_layers.{i}.cross_attention.k_proj.weight"] = to_qk_weight
state_dict[f"transformer_layers.{i}.cross_attention.k_proj.bias"] = to_qk_bias
return state_dict
@torch.no_grad()
def write_model(
model_path,
checkpoint_url,
organization,
push_to_hub=False,
):
os.makedirs(model_path, exist_ok=True)
# ------------------------------------------------------------
# LightGlue config
# ------------------------------------------------------------
config = LightGlueConfig(
descriptor_dim=256,
num_hidden_layers=9,
num_attention_heads=4,
)
config.architectures = ["LightGlueForKeypointMatching"]
config.save_pretrained(model_path)
print("Model config saved successfully...")
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
print(f"Fetching all parameters from the checkpoint at {checkpoint_url}...")
original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)
print("Converting model...")
all_keys = list(original_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
state_dict[new_key] = original_state_dict.pop(key).contiguous().clone()
del original_state_dict
gc.collect()
state_dict = split_weights(state_dict)
state_dict = add_keypoint_detector_state_dict(state_dict)
print("Loading the checkpoint in a LightGlue model...")
device = "cuda"
with torch.device(device):
model = LightGlueForKeypointMatching(config)
model.load_state_dict(state_dict)
print("Checkpoint loaded successfully...")
del model.config._name_or_path
print("Saving the model...")
model.save_pretrained(model_path)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
model = LightGlueForKeypointMatching.from_pretrained(model_path)
print("Model reloaded successfully.")
model_name = "lightglue"
if "superpoint" in checkpoint_url:
model_name += "_superpoint"
if checkpoint_url == DEFAULT_CHECKPOINT_URL:
print("Checking the model outputs...")
verify_model_outputs(model, device)
print("Model outputs verified successfully.")
if push_to_hub:
print("Pushing model to the hub...")
model.push_to_hub(
repo_id=f"{organization}/{model_name}",
commit_message="Add model",
)
config.push_to_hub(repo_id=f"{organization}/{model_name}", commit_message="Add config")
write_image_processor(model_path, model_name, organization, push_to_hub=push_to_hub)
def write_image_processor(save_dir, model_name, organization, push_to_hub=False):
if "superpoint" in model_name:
image_processor = LightGlueImageProcessor(do_grayscale=True)
else:
image_processor = LightGlueImageProcessor()
image_processor.save_pretrained(save_dir)
if push_to_hub:
print("Pushing image processor to the hub...")
image_processor.push_to_hub(
repo_id=f"{organization}/{model_name}",
commit_message="Add image processor",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default=DEFAULT_CHECKPOINT_URL,
type=str,
help="URL of the original LightGlue checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push model and image preprocessor to the hub",
)
parser.add_argument(
"--organization",
default="ETH-CVG",
type=str,
help="Hub organization in which you want the model to be uploaded.",
)
args = parser.parse_args()
write_model(
args.pytorch_dump_folder_path,
args.checkpoint_url,
args.organization,
push_to_hub=args.push_to_hub,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lightglue/convert_lightglue_to_hf.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/lightglue/modular_lightglue.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
import numpy as np
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from ...configuration_utils import PreTrainedConfig
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import ModelOutput, TensorType, auto_docstring, logging
from ...utils.generic import can_return_tuple
from ..auto import CONFIG_MAPPING, AutoConfig
from ..auto.modeling_auto import AutoModelForKeypointDetection
from ..clip.modeling_clip import CLIPMLP
from ..cohere.modeling_cohere import apply_rotary_pos_emb
from ..llama.modeling_llama import LlamaAttention, eager_attention_forward
from ..superglue.image_processing_superglue import (
SuperGlueImageProcessor,
SuperGlueImageProcessorKwargs,
)
from ..superglue.image_processing_superglue_fast import SuperGlueImageProcessorFast
from ..superpoint import SuperPointConfig
logger = logging.get_logger(__name__)
class LightGlueConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LightGlueForKeypointMatching`]. It is used to
instantiate a LightGlue model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LightGlue
[ETH-CVG/lightglue_superpoint](https://huggingface.co/ETH-CVG/lightglue_superpoint) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
keypoint_detector_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SuperPointConfig`):
The config object or dictionary of the keypoint detector.
descriptor_dim (`int`, *optional*, defaults to 256):
The dimension of the descriptors.
num_hidden_layers (`int`, *optional*, defaults to 9):
The number of self and cross attention layers.
num_attention_heads (`int`, *optional*, defaults to 4):
The number of heads in the multi-head attention.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
depth_confidence (`float`, *optional*, defaults to 0.95):
The confidence threshold used to perform early stopping
width_confidence (`float`, *optional*, defaults to 0.99):
The confidence threshold used to prune points
filter_threshold (`float`, *optional*, defaults to 0.1):
The confidence threshold used to filter matches
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function to be used in the hidden layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether to trust remote code when using other models than SuperPoint as keypoint detector.
Examples:
```python
>>> from transformers import LightGlueConfig, LightGlueForKeypointMatching
>>> # Initializing a LightGlue style configuration
>>> configuration = LightGlueConfig()
>>> # Initializing a model from the LightGlue style configuration
>>> model = LightGlueForKeypointMatching(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "lightglue"
sub_configs = {"keypoint_detector_config": AutoConfig}
def __init__(
self,
keypoint_detector_config: SuperPointConfig = None,
descriptor_dim: int = 256,
num_hidden_layers: int = 9,
num_attention_heads: int = 4,
num_key_value_heads=None,
depth_confidence: float = 0.95,
width_confidence: float = 0.99,
filter_threshold: float = 0.1,
initializer_range: float = 0.02,
hidden_act: str = "gelu",
attention_dropout=0.0,
attention_bias=True,
trust_remote_code: bool = False,
**kwargs,
):
# LightGlue can be used with other models than SuperPoint as keypoint detector
# We provide the trust_remote_code argument to allow the use of other models
# that are not registered in the CONFIG_MAPPING dictionary (for example DISK)
self.trust_remote_code = trust_remote_code
if descriptor_dim % num_attention_heads != 0:
raise ValueError("descriptor_dim % num_heads is different from zero")
self.descriptor_dim = descriptor_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.depth_confidence = depth_confidence
self.width_confidence = width_confidence
self.filter_threshold = filter_threshold
self.initializer_range = initializer_range
# Keypoint Detector is forced into eager attention mode because SuperPoint does not have Attention
# See https://github.com/huggingface/transformers/pull/31718#discussion_r2109733153
if isinstance(keypoint_detector_config, dict):
keypoint_detector_config["model_type"] = keypoint_detector_config.get("model_type", "superpoint")
if keypoint_detector_config["model_type"] not in CONFIG_MAPPING:
keypoint_detector_config = AutoConfig.from_pretrained(
keypoint_detector_config["_name_or_path"], trust_remote_code=self.trust_remote_code
)
else:
keypoint_detector_config = CONFIG_MAPPING[keypoint_detector_config["model_type"]](
**keypoint_detector_config, attn_implementation="eager"
)
if keypoint_detector_config is None:
keypoint_detector_config = CONFIG_MAPPING["superpoint"](attn_implementation="eager")
self.keypoint_detector_config = keypoint_detector_config
self.hidden_size = descriptor_dim
self.intermediate_size = descriptor_dim * 2
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.attention_bias = attention_bias
super().__init__(**kwargs)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of LightGlue keypoint matching models. Due to the nature of keypoint detection and matching,
the number of keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the
batch of images, the maximum number of matches is set as the dimension of the matches and matching scores. The mask
tensor is used to indicate which values in the keypoints, matches, matching_scores and prune tensors are keypoint
matching information.
"""
)
class LightGlueKeypointMatchingOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
matches (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Index of keypoint matched in the other image.
matching_scores (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Scores of predicted matches.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Absolute (x, y) coordinates of predicted keypoints in a given image.
prune (`torch.IntTensor` of shape `(batch_size, num_keypoints)`):
Pruning mask indicating which keypoints are removed and at which layer.
mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in matches, matching_scores, keypoints and prune are keypoint matching
information.
hidden_states (`Tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, 2, num_channels,
num_keypoints)` returned when `output_hidden_states=True` is passed or when
`config.output_hidden_states=True`
attentions (`Tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, 2, num_heads, num_keypoints,
num_keypoints)` returned when `output_attentions=True` is passed or when
`config.output_attentions=True`
"""
loss: torch.FloatTensor | None = None
matches: torch.FloatTensor | None = None
matching_scores: torch.FloatTensor | None = None
keypoints: torch.FloatTensor | None = None
prune: torch.IntTensor | None = None
mask: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
class LightGlueImageProcessorKwargs(SuperGlueImageProcessorKwargs):
pass
class LightGlueImageProcessor(SuperGlueImageProcessor):
def post_process_keypoint_matching(
self,
outputs: "LightGlueKeypointMatchingOutput",
target_sizes: TensorType | list[tuple],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
return super().post_process_keypoint_matching(outputs, target_sizes, threshold)
class LightGlueImageProcessorFast(SuperGlueImageProcessorFast):
def post_process_keypoint_matching(
self,
outputs: "LightGlueKeypointMatchingOutput",
target_sizes: TensorType | list[tuple],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
return super().post_process_keypoint_matching(outputs, target_sizes, threshold)
class LightGluePositionalEncoder(nn.Module):
def __init__(self, config: LightGlueConfig):
super().__init__()
self.projector = nn.Linear(2, config.descriptor_dim // config.num_attention_heads // 2, bias=False)
def forward(
self, keypoints: torch.Tensor, output_hidden_states: bool | None = False
) -> tuple[torch.Tensor] | tuple[torch.Tensor, torch.Tensor]:
projected_keypoints = self.projector(keypoints)
embeddings = projected_keypoints.repeat_interleave(2, dim=-1)
cosines = torch.cos(embeddings)
sines = torch.sin(embeddings)
embeddings = (cosines, sines)
output = (embeddings, projected_keypoints) if output_hidden_states else (embeddings,)
return output
class LightGlueAttention(LlamaAttention):
def __init__(self, config: LightGlueConfig, layer_idx: int):
super().__init__()
del self.rotary_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
current_attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
key_states = self.k_proj(current_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(hidden_shape).transpose(1, 2)
if position_embeddings is not None:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
current_attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class LightGlueMLP(CLIPMLP):
def __init__(self, config: LightGlueConfig):
super().__init__(config)
self.fc1 = nn.Linear(config.intermediate_size, config.intermediate_size)
self.layer_norm = nn.LayerNorm(config.intermediate_size, elementwise_affine=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class LightGlueTransformerLayer(nn.Module):
def __init__(self, config: LightGlueConfig, layer_idx: int):
super().__init__()
self.self_attention = LightGlueAttention(config, layer_idx)
self.self_mlp = LightGlueMLP(config)
self.cross_attention = LightGlueAttention(config, layer_idx)
self.cross_mlp = LightGlueMLP(config)
def forward(
self,
descriptors: torch.Tensor,
keypoints: torch.Tensor,
attention_mask: torch.Tensor,
output_hidden_states: bool | None = False,
output_attentions: bool | None = False,
) -> tuple[torch.Tensor, tuple[torch.Tensor] | None, tuple[torch.Tensor] | None]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if output_hidden_states:
all_hidden_states = all_hidden_states + (descriptors,)
batch_size, num_keypoints, descriptor_dim = descriptors.shape
# Self attention block
attention_output, self_attentions = self.self_attention(
descriptors,
position_embeddings=keypoints,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
intermediate_states = torch.cat([descriptors, attention_output], dim=-1)
output_states = self.self_mlp(intermediate_states)
self_attention_descriptors = descriptors + output_states
if output_hidden_states:
self_attention_hidden_states = (intermediate_states, output_states)
# Reshape hidden_states to group by image_pairs :
# (batch_size, num_keypoints, descriptor_dim) -> (batch_size, 2, num_keypoints, descriptor_dim)
# Flip dimension 1 to perform cross attention :
# (image0, image1) -> (image1, image0)
# Reshape back to original shape :
# (batch_size, 2, num_keypoints, descriptor_dim) -> (batch_size, num_keypoints, descriptor_dim)
encoder_hidden_states = (
self_attention_descriptors.reshape(-1, 2, num_keypoints, descriptor_dim)
.flip(1)
.reshape(batch_size, num_keypoints, descriptor_dim)
)
# Same for mask
encoder_attention_mask = (
attention_mask.reshape(-1, 2, 1, 1, num_keypoints).flip(1).reshape(batch_size, 1, 1, num_keypoints)
if attention_mask is not None
else None
)
# Cross attention block
cross_attention_output, cross_attentions = self.cross_attention(
self_attention_descriptors,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
cross_intermediate_states = torch.cat([self_attention_descriptors, cross_attention_output], dim=-1)
cross_output_states = self.cross_mlp(cross_intermediate_states)
descriptors = self_attention_descriptors + cross_output_states
if output_hidden_states:
cross_attention_hidden_states = (cross_intermediate_states, cross_output_states)
all_hidden_states = (
all_hidden_states
+ (self_attention_descriptors.reshape(batch_size, num_keypoints, descriptor_dim),)
+ self_attention_hidden_states
+ (descriptors.reshape(batch_size, num_keypoints, descriptor_dim),)
+ cross_attention_hidden_states
)
if output_attentions:
all_attentions = all_attentions + (self_attentions,) + (cross_attentions,)
return descriptors, all_hidden_states, all_attentions
def sigmoid_log_double_softmax(
similarity: torch.Tensor, matchability0: torch.Tensor, matchability1: torch.Tensor
) -> torch.Tensor:
"""create the log assignment matrix from logits and similarity"""
batch_size, num_keypoints_0, num_keypoints_1 = similarity.shape
certainties = nn.functional.logsigmoid(matchability0) + nn.functional.logsigmoid(matchability1).transpose(1, 2)
scores0 = nn.functional.log_softmax(similarity, 2)
scores1 = nn.functional.log_softmax(similarity.transpose(-1, -2).contiguous(), 2).transpose(-1, -2)
scores = similarity.new_full((batch_size, num_keypoints_0 + 1, num_keypoints_1 + 1), 0)
scores[:, :num_keypoints_0, :num_keypoints_1] = scores0 + scores1 + certainties
scores[:, :-1, -1] = nn.functional.logsigmoid(-matchability0.squeeze(-1))
scores[:, -1, :-1] = nn.functional.logsigmoid(-matchability1.squeeze(-1))
return scores
class LightGlueMatchAssignmentLayer(nn.Module):
def __init__(self, config: LightGlueConfig):
super().__init__()
self.descriptor_dim = config.descriptor_dim
self.final_projection = nn.Linear(self.descriptor_dim, self.descriptor_dim, bias=True)
self.matchability = nn.Linear(self.descriptor_dim, 1, bias=True)
def forward(self, descriptors: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
batch_size, num_keypoints, descriptor_dim = descriptors.shape
# Final projection and similarity computation
m_descriptors = self.final_projection(descriptors)
m_descriptors = m_descriptors / torch.tensor(self.descriptor_dim, device=m_descriptors.device) ** 0.25
m_descriptors = m_descriptors.reshape(batch_size // 2, 2, num_keypoints, descriptor_dim)
m_descriptors0 = m_descriptors[:, 0]
m_descriptors1 = m_descriptors[:, 1]
similarity = m_descriptors0 @ m_descriptors1.transpose(-1, -2)
if mask is not None:
mask = mask.reshape(batch_size // 2, 2, num_keypoints)
mask0 = mask[:, 0].unsqueeze(-1)
mask1 = mask[:, 1].unsqueeze(-1).transpose(-1, -2)
mask = mask0 * mask1
similarity = similarity.masked_fill(mask == 0, torch.finfo(similarity.dtype).min)
# Compute matchability of descriptors
matchability = self.matchability(descriptors)
matchability = matchability.reshape(batch_size // 2, 2, num_keypoints, 1)
matchability_0 = matchability[:, 0]
matchability_1 = matchability[:, 1]
# Compute scores from similarity and matchability
scores = sigmoid_log_double_softmax(similarity, matchability_0, matchability_1)
return scores
def get_matchability(self, descriptors: torch.Tensor) -> torch.Tensor:
"""Get matchability of descriptors as a probability"""
matchability = self.matchability(descriptors)
matchability = nn.functional.sigmoid(matchability).squeeze(-1)
return matchability
class LightGlueTokenConfidenceLayer(nn.Module):
def __init__(self, config: LightGlueConfig):
super().__init__()
self.token = nn.Linear(config.descriptor_dim, 1)
def forward(self, descriptors: torch.Tensor) -> torch.Tensor:
token = self.token(descriptors.detach())
token = nn.functional.sigmoid(token).squeeze(-1)
return token
@auto_docstring
class LightGluePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: LightGlueConfig
base_model_prefix = "lightglue"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = False
_supports_flash_attn = True
_supports_sdpa = True
def get_matches_from_scores(scores: torch.Tensor, threshold: float) -> tuple[torch.Tensor, torch.Tensor]:
"""obtain matches from a score matrix [Bx M+1 x N+1]"""
batch_size, _, _ = scores.shape
# For each keypoint, get the best match
max0 = scores[:, :-1, :-1].max(2)
max1 = scores[:, :-1, :-1].max(1)
matches0 = max0.indices
matches1 = max1.indices
# Mutual check for matches
indices0 = torch.arange(matches0.shape[1], device=matches0.device)[None]
indices1 = torch.arange(matches1.shape[1], device=matches1.device)[None]
mutual0 = indices0 == matches1.gather(1, matches0)
mutual1 = indices1 == matches0.gather(1, matches1)
# Get matching scores and filter based on mutual check and thresholding
max0 = max0.values.exp()
zero = max0.new_tensor(0)
matching_scores0 = torch.where(mutual0, max0, zero)
matching_scores1 = torch.where(mutual1, matching_scores0.gather(1, matches1), zero)
valid0 = mutual0 & (matching_scores0 > threshold)
valid1 = mutual1 & valid0.gather(1, matches1)
# Filter matches based on mutual check and thresholding of scores
matches0 = torch.where(valid0, matches0, -1)
matches1 = torch.where(valid1, matches1, -1)
matches = torch.stack([matches0, matches1]).transpose(0, 1).reshape(batch_size * 2, -1)
matching_scores = torch.stack([matching_scores0, matching_scores1]).transpose(0, 1).reshape(batch_size * 2, -1)
return matches, matching_scores
def normalize_keypoints(keypoints: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
Normalize keypoints locations based on image image_shape
Args:
keypoints (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`):
Keypoints locations in (x, y) format.
height (`int`):
Image height.
width (`int`):
Image width.
Returns:
Normalized keypoints locations of shape (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`).
"""
size = torch.tensor([width, height], device=keypoints.device, dtype=keypoints.dtype)[None]
shift = size / 2
scale = size.max(-1).values / 2
keypoints = (keypoints - shift[..., None, :]) / scale[..., None, None]
return keypoints
@auto_docstring(
custom_intro="""
LightGlue model taking images as inputs and outputting the matching of them.
"""
)
class LightGlueForKeypointMatching(LightGluePreTrainedModel):
"""
LightGlue is a model matching keypoints in images by leveraging detections from a keypoint detector such as
SuperPoint. It is based on the SuperGlue architecture and is designed to be lightweight and efficient.
It consists of :
1. Keypoint Encoder
2. A Graph Neural Network with self and cross attention layers
3. Matching Assignment layers
The correspondence ids use -1 to indicate non-matching points.
Philipp Lindenberger, Paul-Edouard Sarlin and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed.
In ICCV 2023. https://huggingface.co/papers/2306.13643
"""
def __init__(self, config: LightGlueConfig):
super().__init__(config)
self.keypoint_detector = AutoModelForKeypointDetection.from_config(
config.keypoint_detector_config, trust_remote_code=config.trust_remote_code
)
self.keypoint_detector_descriptor_dim = config.keypoint_detector_config.descriptor_decoder_dim
self.descriptor_dim = config.descriptor_dim
self.num_layers = config.num_hidden_layers
self.filter_threshold = config.filter_threshold
self.depth_confidence = config.depth_confidence
self.width_confidence = config.width_confidence
if self.descriptor_dim != self.keypoint_detector_descriptor_dim:
self.input_projection = nn.Linear(self.keypoint_detector_descriptor_dim, self.descriptor_dim, bias=True)
else:
self.input_projection = nn.Identity()
self.positional_encoder = LightGluePositionalEncoder(config)
self.transformer_layers = nn.ModuleList(
[LightGlueTransformerLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.match_assignment_layers = nn.ModuleList(
[LightGlueMatchAssignmentLayer(config) for _ in range(config.num_hidden_layers)]
)
self.token_confidence = nn.ModuleList(
[LightGlueTokenConfidenceLayer(config) for _ in range(config.num_hidden_layers - 1)]
)
self.post_init()
def _get_confidence_threshold(self, layer_index: int) -> float:
"""scaled confidence threshold for a given layer"""
threshold = 0.8 + 0.1 * np.exp(-4.0 * layer_index / self.num_layers)
return np.clip(threshold, 0, 1)
def _keypoint_processing(
self, descriptors: torch.Tensor, keypoints: torch.Tensor, output_hidden_states: bool | None = False
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
descriptors = descriptors.detach().contiguous()
projected_descriptors = self.input_projection(descriptors)
keypoint_encoding_output = self.positional_encoder(keypoints, output_hidden_states=output_hidden_states)
return projected_descriptors, keypoint_encoding_output
def _get_early_stopped_image_pairs(
self, keypoint_confidences: torch.Tensor, layer_index: int, mask: torch.Tensor, num_points: torch.Tensor
) -> torch.Tensor:
"""evaluate whether we should stop inference based on the confidence of the keypoints"""
batch_size, _ = mask.shape
if layer_index < self.num_layers - 1:
# If the current layer is not the last layer, we compute the confidence of the keypoints and check
# if we should stop the forward pass through the transformer layers for each pair of images.
keypoint_confidences = keypoint_confidences.masked_fill(mask == 0, 1)
keypoint_confidences = keypoint_confidences.reshape(batch_size // 2, -1)
threshold = self._get_confidence_threshold(layer_index)
ratio_confident = 1.0 - (keypoint_confidences < threshold).float().sum(dim=1) / num_points
early_stopped_pairs = ratio_confident > self.depth_confidence
else:
# If the current layer is the last layer, we stop the forward pass through the transformer layers for
# all pairs of images.
early_stopped_pairs = torch.ones(batch_size, dtype=torch.bool)
return early_stopped_pairs
def _get_keypoint_matching(self, descriptors, mask, layer_index, early_stops=None):
if early_stops is not None:
descriptors = descriptors[early_stops]
mask = mask[early_stops]
scores = self.match_assignment_layers[layer_index](descriptors, mask)
matches, matching_scores = get_matches_from_scores(scores, self.filter_threshold)
return matches, matching_scores
def _get_pruning_mask(self, confidences: torch.Tensor, scores: torch.Tensor, layer_index: int) -> torch.Tensor:
"""mask points which should be removed"""
keep = scores > (1 - self.width_confidence)
if confidences is not None: # Low-confidence points are never pruned.
keep |= confidences <= self._get_confidence_threshold(layer_index)
return keep
def _do_layer_keypoint_pruning(
self,
descriptors: torch.Tensor,
keypoints: torch.Tensor,
mask: torch.Tensor,
indices: torch.Tensor,
prune_output: torch.Tensor,
keypoint_confidences: torch.Tensor,
layer_index: int,
):
"""
For a given layer, prune keypoints based on the confidence of the keypoints and the matchability of the
descriptors.
"""
batch_size, _, _ = descriptors.shape
descriptors_matchability = self.match_assignment_layers[layer_index].get_matchability(descriptors)
pruned_keypoints_mask = self._get_pruning_mask(keypoint_confidences, descriptors_matchability, layer_index)
pruned_keypoints_mask = pruned_keypoints_mask.masked_fill(mask == 0, torch.tensor(False))
# For each image, we extract the pruned indices and the corresponding descriptors and keypoints.
pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask, pruned_indices = (
[t[mask] for t, mask in zip(tensor, pruned_keypoints_mask)]
for tensor in [descriptors, keypoints[0], keypoints[1], pruned_keypoints_mask, indices]
)
for i in range(batch_size):
prune_output[i, pruned_indices[i]] += 1
# Pad the pruned descriptors, keypoints, indices and mask to have the same shape across the batch.
pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask = (
pad_sequence(pruned_tensor, batch_first=True)
for pruned_tensor in [pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask]
)
pruned_keypoints = (pruned_keypoints_0, pruned_keypoints_1)
pruned_indices = pad_sequence(pruned_indices, batch_first=True, padding_value=-1)
return pruned_descriptors, pruned_keypoints, pruned_indices, pruned_mask, prune_output
def _concat_early_stopped_outputs(
self,
early_stops_indices,
final_pruned_keypoints_indices,
final_pruned_keypoints_iterations,
matches,
matching_scores,
):
early_stops_indices = torch.stack(early_stops_indices)
# Rearrange tensors to have the same order as the input batch
ids = torch.arange(early_stops_indices.shape[0])
order_indices = early_stops_indices[ids]
early_stops_indices = early_stops_indices[order_indices]
matches, final_pruned_keypoints_indices = (
pad_sequence(tensor, batch_first=True, padding_value=-1)
for tensor in [matches, final_pruned_keypoints_indices]
)
matching_scores, final_pruned_keypoints_iterations = (
pad_sequence(tensor, batch_first=True, padding_value=0)
for tensor in [matching_scores, final_pruned_keypoints_iterations]
)
matches, matching_scores, final_pruned_keypoints_indices, final_pruned_keypoints_iterations = (
tensor[early_stops_indices]
for tensor in [
matches,
matching_scores,
final_pruned_keypoints_indices,
final_pruned_keypoints_iterations,
]
)
return final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores
def _do_final_keypoint_pruning(
self,
indices: torch.Tensor,
matches: torch.Tensor,
matching_scores: torch.Tensor,
num_keypoints: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
# (batch_size, num_keypoints) -> (batch_size // 2, 2, num_keypoints) -> 2 * (batch_size // 2, num_keypoints) to
# have tensors from
batch_size, _ = indices.shape
indices, matches, matching_scores = (
tensor.reshape(batch_size // 2, 2, -1) for tensor in [indices, matches, matching_scores]
)
indices0 = indices[:, 0]
indices1 = indices[:, 1]
matches0 = matches[:, 0]
matches1 = matches[:, 1]
matching_scores0 = matching_scores[:, 0]
matching_scores1 = matching_scores[:, 1]
# Prepare final matches and matching scores
_matches = torch.full((batch_size // 2, 2, num_keypoints), -1, device=indices.device, dtype=matches.dtype)
_matching_scores = torch.zeros(
(batch_size // 2, 2, num_keypoints), device=indices.device, dtype=matching_scores.dtype
)
# Fill the matches and matching scores for each image pair
for i in range(batch_size // 2):
_matches[i, 0, indices0[i]] = torch.where(
matches0[i] == -1, -1, indices1[i].gather(0, matches0[i].clamp(min=0))
)
_matches[i, 1, indices1[i]] = torch.where(
matches1[i] == -1, -1, indices0[i].gather(0, matches1[i].clamp(min=0))
)
_matching_scores[i, 0, indices0[i]] = matching_scores0[i]
_matching_scores[i, 1, indices1[i]] = matching_scores1[i]
return _matches, _matching_scores
def _match_image_pair(
self,
keypoints: torch.Tensor,
descriptors: torch.Tensor,
height: int,
width: int,
mask: torch.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, tuple, tuple]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if keypoints.shape[2] == 0: # no keypoints
shape = keypoints.shape[:-1]
return (
keypoints.new_full(shape, -1, dtype=torch.int),
keypoints.new_zeros(shape),
keypoints.new_zeros(shape),
all_hidden_states,
all_attentions,
)
device = keypoints.device
batch_size, _, initial_num_keypoints, _ = keypoints.shape
num_points_per_pair = torch.sum(mask.reshape(batch_size, -1), dim=1)
# (batch_size, 2, num_keypoints, 2) -> (batch_size * 2, num_keypoints, 2)
keypoints = keypoints.reshape(batch_size * 2, initial_num_keypoints, 2)
mask = mask.reshape(batch_size * 2, initial_num_keypoints) if mask is not None else None
descriptors = descriptors.reshape(batch_size * 2, initial_num_keypoints, self.keypoint_detector_descriptor_dim)
image_indices = torch.arange(batch_size * 2, device=device)
# Keypoint normalization
keypoints = normalize_keypoints(keypoints, height, width)
descriptors, keypoint_encoding_output = self._keypoint_processing(
descriptors, keypoints, output_hidden_states=output_hidden_states
)
keypoints = keypoint_encoding_output[0]
# Early stop consists of stopping the forward pass through the transformer layers when the confidence of the
# keypoints is above a certain threshold.
do_early_stop = self.depth_confidence > 0
# Keypoint pruning consists of removing keypoints from the input of the transformer layers when the confidence of
# the keypoints is below a certain threshold.
do_keypoint_pruning = self.width_confidence > 0
early_stops_indices = []
matches = []
matching_scores = []
final_pruned_keypoints_indices = []
final_pruned_keypoints_iterations = []
pruned_keypoints_indices = torch.arange(0, initial_num_keypoints, device=device).expand(batch_size * 2, -1)
pruned_keypoints_iterations = torch.ones_like(pruned_keypoints_indices)
for layer_index in range(self.num_layers):
input_shape = descriptors.size()
if mask is not None:
extended_attention_mask = self.get_extended_attention_mask(mask, input_shape)
else:
extended_attention_mask = torch.ones((batch_size, input_shape[-2]), device=keypoints.device)
layer_output = self.transformer_layers[layer_index](
descriptors,
keypoints,
attention_mask=extended_attention_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
descriptors, hidden_states, attention = layer_output
if output_hidden_states:
all_hidden_states = all_hidden_states + hidden_states
if output_attentions:
all_attentions = all_attentions + attention
if do_early_stop:
if layer_index < self.num_layers - 1:
# Get the confidence of the keypoints for the current layer
keypoint_confidences = self.token_confidence[layer_index](descriptors)
# Determine which pairs of images should be early stopped based on the confidence of the keypoints for
# the current layer.
early_stopped_pairs = self._get_early_stopped_image_pairs(
keypoint_confidences, layer_index, mask, num_points=num_points_per_pair
)
else:
# Early stopping always occurs at the last layer
early_stopped_pairs = torch.ones(batch_size, dtype=torch.bool)
if torch.any(early_stopped_pairs):
# If a pair of images is considered early stopped, we compute the matches for the remaining
# keypoints and stop the forward pass through the transformer layers for this pair of images.
early_stops = early_stopped_pairs.repeat_interleave(2)
early_stopped_image_indices = image_indices[early_stops]
early_stopped_matches, early_stopped_matching_scores = self._get_keypoint_matching(
descriptors, mask, layer_index, early_stops=early_stops
)
early_stops_indices.extend(list(early_stopped_image_indices))
matches.extend(list(early_stopped_matches))
matching_scores.extend(list(early_stopped_matching_scores))
if do_keypoint_pruning:
final_pruned_keypoints_indices.extend(list(pruned_keypoints_indices[early_stops]))
final_pruned_keypoints_iterations.extend(list(pruned_keypoints_iterations[early_stops]))
# Remove image pairs that have been early stopped from the forward pass
num_points_per_pair = num_points_per_pair[~early_stopped_pairs]
descriptors, keypoints_0, keypoint_1, mask, image_indices = tuple(
tensor[~early_stops]
for tensor in [descriptors, keypoints[0], keypoints[1], mask, image_indices]
)
keypoints = (keypoints_0, keypoint_1)
if do_keypoint_pruning:
pruned_keypoints_indices, pruned_keypoints_iterations, keypoint_confidences = tuple(
tensor[~early_stops]
for tensor in [
pruned_keypoints_indices,
pruned_keypoints_iterations,
keypoint_confidences,
]
)
# If all pairs of images are early stopped, we stop the forward pass through the transformer
# layers for all pairs of images.
if torch.all(early_stopped_pairs):
break
if do_keypoint_pruning:
# Prune keypoints from the input of the transformer layers for the next iterations if the confidence of
# the keypoints is below a certain threshold.
descriptors, keypoints, pruned_keypoints_indices, mask, pruned_keypoints_iterations = (
self._do_layer_keypoint_pruning(
descriptors,
keypoints,
mask,
pruned_keypoints_indices,
pruned_keypoints_iterations,
keypoint_confidences,
layer_index,
)
)
if do_early_stop and do_keypoint_pruning:
# Concatenate early stopped outputs together and perform final keypoint pruning
final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores = (
self._concat_early_stopped_outputs(
early_stops_indices,
final_pruned_keypoints_indices,
final_pruned_keypoints_iterations,
matches,
matching_scores,
)
)
matches, matching_scores = self._do_final_keypoint_pruning(
final_pruned_keypoints_indices,
matches,
matching_scores,
initial_num_keypoints,
)
else:
matches, matching_scores = self._get_keypoint_matching(descriptors, mask, self.num_layers - 1)
final_pruned_keypoints_iterations = torch.ones_like(matching_scores) * self.num_layers
final_pruned_keypoints_iterations = final_pruned_keypoints_iterations.reshape(
batch_size, 2, initial_num_keypoints
)
return (
matches,
matching_scores,
final_pruned_keypoints_iterations,
all_hidden_states,
all_attentions,
)
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: torch.LongTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
**kwargs,
) -> tuple | LightGlueKeypointMatchingOutput:
loss = None
if labels is not None:
raise ValueError("LightGlue is not trainable, no labels should be provided.")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if pixel_values.ndim != 5 or pixel_values.size(1) != 2:
raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)")
batch_size, _, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width)
keypoint_detections = self.keypoint_detector(pixel_values)
keypoints, _, descriptors, mask = keypoint_detections[:4]
keypoints = keypoints.reshape(batch_size, 2, -1, 2).to(pixel_values)
descriptors = descriptors.reshape(batch_size, 2, -1, self.keypoint_detector_descriptor_dim).to(pixel_values)
mask = mask.reshape(batch_size, 2, -1)
absolute_keypoints = keypoints.clone()
absolute_keypoints[:, :, :, 0] = absolute_keypoints[:, :, :, 0] * width
absolute_keypoints[:, :, :, 1] = absolute_keypoints[:, :, :, 1] * height
matches, matching_scores, prune, hidden_states, attentions = self._match_image_pair(
absolute_keypoints,
descriptors,
height,
width,
mask=mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
return LightGlueKeypointMatchingOutput(
loss=loss,
matches=matches,
matching_scores=matching_scores,
keypoints=keypoints,
prune=prune,
mask=mask,
hidden_states=hidden_states,
attentions=attentions,
)
__all__ = [
"LightGluePreTrainedModel",
"LightGlueForKeypointMatching",
"LightGlueConfig",
"LightGlueImageProcessor",
"LightGlueImageProcessorFast",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/lightglue/modular_lightglue.py",
"license": "Apache License 2.0",
"lines": 849,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/lightglue/test_image_processing_lightglue.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tests.models.superglue.test_image_processing_superglue import (
SuperGlueImageProcessingTest,
SuperGlueImageProcessingTester,
)
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
if is_torch_available():
import numpy as np
import torch
from transformers.models.lightglue.modeling_lightglue import LightGlueKeypointMatchingOutput
if is_vision_available():
from transformers import LightGlueImageProcessor
if is_torchvision_available():
from transformers import LightGlueImageProcessorFast
def random_array(size):
return np.random.randint(255, size=size)
def random_tensor(size):
return torch.rand(size)
class LightGlueImageProcessingTester(SuperGlueImageProcessingTester):
"""Tester for LightGlueImageProcessor"""
def __init__(
self,
parent,
batch_size=6,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_grayscale=True,
):
super().__init__(
parent, batch_size, num_channels, image_size, min_resolution, max_resolution, do_resize, size, do_grayscale
)
def prepare_keypoint_matching_output(self, pixel_values):
"""Prepare a fake output for the keypoint matching model with random matches between 50 keypoints per image."""
max_number_keypoints = 50
batch_size = len(pixel_values)
mask = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int)
keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2))
matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int)
scores = torch.zeros((batch_size, 2, max_number_keypoints))
prune = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int)
for i in range(batch_size):
random_number_keypoints0 = np.random.randint(10, max_number_keypoints)
random_number_keypoints1 = np.random.randint(10, max_number_keypoints)
random_number_matches = np.random.randint(5, min(random_number_keypoints0, random_number_keypoints1))
mask[i, 0, :random_number_keypoints0] = 1
mask[i, 1, :random_number_keypoints1] = 1
keypoints[i, 0, :random_number_keypoints0] = torch.rand((random_number_keypoints0, 2))
keypoints[i, 1, :random_number_keypoints1] = torch.rand((random_number_keypoints1, 2))
random_matches_indices0 = torch.randperm(random_number_keypoints1, dtype=torch.int)[:random_number_matches]
random_matches_indices1 = torch.randperm(random_number_keypoints0, dtype=torch.int)[:random_number_matches]
matches[i, 0, random_matches_indices1] = random_matches_indices0
matches[i, 1, random_matches_indices0] = random_matches_indices1
scores[i, 0, random_matches_indices1] = torch.rand((random_number_matches,))
scores[i, 1, random_matches_indices0] = torch.rand((random_number_matches,))
return LightGlueKeypointMatchingOutput(
mask=mask, keypoints=keypoints, matches=matches, matching_scores=scores, prune=prune
)
@require_torch
@require_vision
class LightGlueImageProcessingTest(SuperGlueImageProcessingTest, unittest.TestCase):
image_processing_class = LightGlueImageProcessor if is_vision_available() else None
fast_image_processing_class = LightGlueImageProcessorFast if is_torchvision_available() else None
def setUp(self) -> None:
super().setUp()
self.image_processor_tester = LightGlueImageProcessingTester(self)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/lightglue/test_image_processing_lightglue.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/lightglue/test_modeling_lightglue.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from functools import cached_property
from datasets import load_dataset
from transformers.models.lightglue.configuration_lightglue import LightGlueConfig
from transformers.testing_utils import get_device_properties, require_torch, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import LightGlueForKeypointMatching
if is_vision_available():
from transformers import AutoImageProcessor
class LightGlueModelTester:
def __init__(
self,
parent,
batch_size=2,
image_width=80,
image_height=60,
keypoint_detector_config={
"encoder_hidden_sizes": [32, 32, 64],
"decoder_hidden_size": 64,
"keypoint_decoder_dim": 65,
"descriptor_decoder_dim": 64,
"keypoint_threshold": 0.005,
"max_keypoints": 256,
"nms_radius": 4,
"border_removal_distance": 4,
},
descriptor_dim: int = 64,
num_layers: int = 2,
num_heads: int = 4,
depth_confidence: float = 1.0,
width_confidence: float = 1.0,
filter_threshold: float = 0.1,
matching_threshold: float = 0.0,
):
self.parent = parent
self.batch_size = batch_size
self.image_width = image_width
self.image_height = image_height
self.keypoint_detector_config = keypoint_detector_config
self.descriptor_dim = descriptor_dim
self.num_layers = num_layers
self.num_heads = num_heads
self.depth_confidence = depth_confidence
self.width_confidence = width_confidence
self.filter_threshold = filter_threshold
self.matching_threshold = matching_threshold
def prepare_config_and_inputs(self):
# LightGlue expects a grayscale image as input
pixel_values = floats_tensor([self.batch_size, 2, 3, self.image_height, self.image_width])
config = self.get_config()
return config, pixel_values
def get_config(self):
return LightGlueConfig(
keypoint_detector_config=self.keypoint_detector_config,
descriptor_dim=self.descriptor_dim,
num_hidden_layers=self.num_layers,
num_attention_heads=self.num_heads,
depth_confidence=self.depth_confidence,
width_confidence=self.width_confidence,
filter_threshold=self.filter_threshold,
matching_threshold=self.matching_threshold,
attn_implementation="eager",
)
def create_and_check_model(self, config, pixel_values):
model = LightGlueForKeypointMatching(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
maximum_num_matches = result.mask.shape[-1]
self.parent.assertEqual(
result.keypoints.shape,
(self.batch_size, 2, maximum_num_matches, 2),
)
self.parent.assertEqual(
result.matches.shape,
(self.batch_size, 2, maximum_num_matches),
)
self.parent.assertEqual(
result.matching_scores.shape,
(self.batch_size, 2, maximum_num_matches),
)
self.parent.assertEqual(
result.prune.shape,
(self.batch_size, 2, maximum_num_matches),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class LightGlueModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (LightGlueForKeypointMatching,) if is_torch_available() else ()
all_generative_model_classes = () if is_torch_available() else ()
test_resize_embeddings = False
has_attentions = True
test_torch_exportable = False
def setUp(self):
self.model_tester = LightGlueModelTester(self)
self.config_tester = ConfigTester(self, config_class=LightGlueConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def test_batching_equivalence(self, atol=1e-5, rtol=1e-5):
device_properties = get_device_properties()
if device_properties[0] == "cuda" and device_properties[1] == 8:
# TODO: (ydshieh) fix this
self.skipTest(reason="After switching to A10, this test always fails, but pass on CPU or T4.")
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="LightGlueForKeypointMatching does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="LightGlueForKeypointMatching does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="This module does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_true(self):
pass
@unittest.skip(reason="LightGlue does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
maximum_num_matches = outputs.mask.shape[-1]
hidden_states_sizes = [
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim * 2,
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim,
self.model_tester.descriptor_dim * 2,
self.model_tester.descriptor_dim,
] * self.model_tester.num_layers
for i, hidden_states_size in enumerate(hidden_states_sizes):
self.assertListEqual(
list(hidden_states[i].shape[-2:]),
[maximum_num_matches, hidden_states_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
def check_attention_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
maximum_num_matches = outputs.mask.shape[-1]
expected_attention_shape = [self.model_tester.num_heads, maximum_num_matches, maximum_num_matches]
for i, attention in enumerate(attentions):
self.assertListEqual(
list(attention.shape[-3:]),
expected_attention_shape,
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
check_attention_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
check_attention_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
from_pretrained_ids = ["ETH-CVG/lightglue_superpoint"]
for model_name in from_pretrained_ids:
model = LightGlueForKeypointMatching.from_pretrained(model_name)
self.assertIsNotNone(model)
# Copied from tests.models.superglue.test_modeling_superglue.SuperGlueModelTest.test_forward_labels_should_be_none
def test_forward_labels_should_be_none(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
model_inputs = self._prepare_for_class(inputs_dict, model_class)
# Provide an arbitrary sized Tensor as labels to model inputs
model_inputs["labels"] = torch.rand((128, 128))
with self.assertRaises(ValueError) as cm:
model(**model_inputs)
self.assertEqual(ValueError, cm.exception.__class__)
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image0 = dataset[0]["image"]
image1 = dataset[1]["image"]
image2 = dataset[2]["image"]
# [image1, image1] on purpose to test the model early stopping
return [[image2, image0], [image1, image1]]
@require_torch
@require_vision
class LightGlueModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("ETH-CVG/lightglue_superpoint") if is_vision_available() else None
@slow
def test_inference(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint", attn_implementation="eager"
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_matches_values0 = outputs.matches[0, 0, 10:30]
predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30]
predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_matches_values1 = outputs.matches[1, 0, 10:30]
predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30]
expected_number_of_matches0 = 866
expected_matches_values0 = torch.tensor(
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
dtype=torch.int64,
device=torch_device,
)
expected_matching_scores_values0 = torch.tensor(
[
0.6188,0.7817,0.5686,0.9353,0.9801,0.9193,0.8632,0.9111,0.9821,0.5496,
0.9906,0.8682,0.9679,0.9914,0.9318,0.1910,0.9669,0.3240,0.9971,0.9923,
],
device=torch_device
) # fmt:skip
expected_number_of_matches1 = 140
expected_matches_values1 = torch.tensor(
[14, -1, -1, 15, 17, 13, -1, -1, -1, -1, -1, -1, 5, -1, -1, 19, -1, 10, -1, 11],
dtype=torch.int64,
device=torch_device,
)
expected_matching_scores_values1 = torch.tensor(
[0.3796, 0, 0, 0.3772, 0.4439, 0.2411, 0, 0, 0.0032, 0, 0, 0, 0.2997, 0, 0, 0.6762, 0, 0.8826, 0, 0.5583],
device=torch_device,
)
# expected_early_stopping_layer = 2
# predicted_early_stopping_layer = torch.max(outputs.prune[1]).item()
# self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer)
# self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches)
"""
Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies
on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this
specific test example). The consequence of having different number of keypoints is that the number of matches
will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less
match. The matching scores will also be different, as the keypoints are different. The checks here are less
strict to account for these inconsistencies.
Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the
expected values, individually. Here, the tolerance of the number of values changing is set to 2.
This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787)
Such CUDA inconsistencies can be found
[here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300)
"""
self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4)
self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2))
< 4
)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2))
< 4
)
self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4)
self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
@slow
def test_inference_without_early_stop(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint", attn_implementation="eager", depth_confidence=1.0
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_matches_values0 = outputs.matches[0, 0, 10:30]
predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30]
predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_matches_values1 = outputs.matches[1, 0, 10:30]
predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30]
expected_number_of_matches0 = 134
expected_matches_values0 = torch.tensor(
[-1, -1, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64
).to(torch_device)
expected_matching_scores_values0 = torch.tensor(
[0.0083, 0, 0.2022, 0.0621, 0, 0.0828, 0, 0, 0.0003, 0, 0, 0, 0.0960, 0, 0, 0.6940, 0, 0.7167, 0, 0.1512]
).to(torch_device)
expected_number_of_matches1 = 862
expected_matches_values1 = torch.tensor(
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=torch.int64
).to(torch_device)
expected_matching_scores_values1 = torch.tensor(
[
0.4772,
0.3781,
0.0631,
0.9559,
0.8746,
0.9271,
0.4882,
0.5406,
0.9439,
0.1526,
0.5028,
0.4107,
0.5591,
0.9130,
0.7572,
0.0302,
0.4532,
0.0893,
0.9490,
0.4880,
]
).to(torch_device)
# expected_early_stopping_layer = 2
# predicted_early_stopping_layer = torch.max(outputs.prune[1]).item()
# self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer)
# self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches)
"""
Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies
on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this
specific test example). The consequence of having different number of keypoints is that the number of matches
will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less
match. The matching scores will also be different, as the keypoints are different. The checks here are less
strict to account for these inconsistencies.
Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the
expected values, individually. Here, the tolerance of the number of values changing is set to 2.
This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787)
Such CUDA inconsistencies can be found
[here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300)
"""
self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4)
self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2))
< 4
)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2))
< 4
)
self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4)
self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
@slow
def test_inference_without_early_stop_and_keypoint_pruning(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint",
attn_implementation="eager",
depth_confidence=1.0,
width_confidence=1.0,
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_matches_values0 = outputs.matches[0, 0, 10:30]
predicted_matching_scores_values0 = outputs.matching_scores[0, 0, 10:30]
predicted_number_of_matches1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_matches_values1 = outputs.matches[1, 0, 10:30]
predicted_matching_scores_values1 = outputs.matching_scores[1, 0, 10:30]
expected_number_of_matches0 = 143
expected_matches_values0 = torch.tensor(
[-1, -1, -1, -1, 17, 13, -1, -1, -1, -1, -1, -1, 5, -1, -1, 19, -1, 10, -1, 11], dtype=torch.int64
).to(torch_device)
# fmt: off
expected_matching_scores_values0 = torch.tensor(
[0.0696, 0.0283, 0.0000, 0.0863, 0.2834, 0.2308, 0.0000, 0.0000, 0.0189, 0.0000, 0.0000, 0.0000, 0.1792, 0.0000, 0.0000, 0.8197, 0.0000, 0.8194, 0.0000, 0.3058]
).to(torch_device)
# fmt: on
expected_number_of_matches1 = 862
expected_matches_values1 = torch.tensor(
[10, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, -1, 26, -1, 28, 29], dtype=torch.int64
).to(torch_device)
# fmt: off
expected_matching_scores_values1 = torch.tensor(
[0.4744, 0.3749, 0.0628, 0.9572, 0.8744, 0.9277, 0.4843, 0.5365, 0.9441, 0.1519, 0.5004, 0.4058, 0.5569, 0.9113, 0.7525, 0.0301, 0.4510, 0.0892, 0.9483, 0.4815]
).to(torch_device)
# fmt: on
# expected_early_stopping_layer = 2
# predicted_early_stopping_layer = torch.max(outputs.prune[1]).item()
# self.assertEqual(predicted_early_stopping_layer, expected_early_stopping_layer)
# self.assertEqual(predicted_number_of_matches, expected_second_number_of_matches)
"""
Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies
on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this
specific test example). The consequence of having different number of keypoints is that the number of matches
will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less
match. The matching scores will also be different, as the keypoints are different. The checks here are less
strict to account for these inconsistencies.
Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the
expected values, individually. Here, the tolerance of the number of values changing is set to 2.
This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787)
Such CUDA inconsistencies can be found
[here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300)
"""
self.assertTrue(abs(predicted_number_of_matches0 - expected_number_of_matches0) < 4)
self.assertTrue(abs(predicted_number_of_matches1 - expected_number_of_matches1) < 4)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values0, expected_matching_scores_values0, atol=1e-2))
< 4
)
self.assertTrue(
torch.sum(~torch.isclose(predicted_matching_scores_values1, expected_matching_scores_values1, atol=1e-2))
< 4
)
self.assertTrue(torch.sum(predicted_matches_values0 != expected_matches_values0) < 4)
self.assertTrue(torch.sum(predicted_matches_values1 != expected_matches_values1) < 4)
@slow
def test_inference_order_with_early_stop(self):
model = LightGlueForKeypointMatching.from_pretrained(
"ETH-CVG/lightglue_superpoint", attn_implementation="eager"
).to(torch_device)
preprocessor = self.default_image_processor
images = prepare_imgs()
# [[image2, image0], [image1, image1]] -> [[image2, image0], [image2, image0], [image1, image1]]
images = [images[0]] + images # adding a 3rd pair to test batching with early stopping
inputs = preprocessor(images=images, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_number_of_matches_pair0 = torch.sum(outputs.matches[0][0] != -1).item()
predicted_number_of_matches_pair1 = torch.sum(outputs.matches[1][0] != -1).item()
predicted_number_of_matches_pair2 = torch.sum(outputs.matches[2][0] != -1).item()
# pair 0 and 1 are the same, so should have the same number of matches
# pair 2 is [image1, image1] so should have more matches than first two pairs
# This ensures that early stopping does not affect the order of the outputs
# See : https://huggingface.co/ETH-CVG/lightglue_superpoint/discussions/6
# The bug made the pairs switch order when early stopping was activated
self.assertTrue(predicted_number_of_matches_pair0 == predicted_number_of_matches_pair1)
self.assertTrue(predicted_number_of_matches_pair0 < predicted_number_of_matches_pair2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/lightglue/test_modeling_lightglue.py",
"license": "Apache License 2.0",
"lines": 490,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/vjepa2/convert_vjepa2_classifier_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import numpy as np
import torch
from decord import VideoReader
from huggingface_hub import HfApi, hf_hub_download
from transformers import VJEPA2ForVideoClassification, VJEPA2VideoProcessor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_video():
path = hf_hub_download(
repo_id="nateraw/kinetics-mini",
filename="val/bowling/-WH-lxmGJVY_000005_000015.mp4",
repo_type="dataset",
)
video_reader = VideoReader(path)
return video_reader
CLASSIFIERS = {
# Something-Something-v2 dataset
"vjepa2-vitl-fpc16-256-ssv2": {
"base_model": "facebook/vjepa2-vitl-fpc64-256",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/ssv2-vitl-16x2x3.pt",
"num_labels": 174,
"frames_per_clip": 16,
"dataset": "something-something-v2",
"result": (145, 0.30867, "Stuffing [something] into [something]"),
},
"vjepa2-vitg-fpc64-384-ssv2": {
"base_model": "facebook/vjepa2-vitg-fpc64-384",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/ssv2-vitg-384-64x2x3.pt",
"frames_per_clip": 64,
"num_labels": 174,
"dataset": "something-something-v2",
"result": (112, 0.26408, "Putting [something] onto [something]"),
},
# Diving48 dataset
"vjepa2-vitl-fpc32-256-diving48": {
"base_model": "facebook/vjepa2-vitl-fpc64-256",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/diving48-vitl-256.pt",
"num_labels": 48,
"frames_per_clip": 32,
"dataset": "diving48",
"result": (35, 0.32875, "['Inward', '35som', 'NoTwis', 'TUCK']"),
},
"vjepa2-vitg-fpc32-384-diving48": {
"base_model": "facebook/vjepa2-vitg-fpc64-384",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/diving48-vitg-384-32x4x3.pt",
"frames_per_clip": 32,
"num_labels": 48,
"dataset": "diving48",
"result": (22, 0.35351, "['Forward', '25som', '2Twis', 'PIKE']"),
},
}
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"module.pooler.query_tokens": r"pooler.query_tokens",
r"module.pooler.cross_attention_block.norm(\d+).": r"pooler.cross_attention_layer.layer_norm\1.",
r"module.pooler.cross_attention_block.xattn.(q|k|v).": r"pooler.cross_attention_layer.cross_attn.\1_proj.",
r"module.pooler.cross_attention_block.mlp.fc(\d+).": r"pooler.cross_attention_layer.mlp.fc\1.",
r"module.pooler.blocks.(\d+).norm(\d+).": r"pooler.self_attention_layers.\1.layer_norm\2.",
r"module.pooler.blocks.(\d+).attn.(q|k|v).": r"pooler.self_attention_layers.\1.self_attn.\2_proj.",
r"module.pooler.blocks.(\d+).attn.proj.": r"pooler.self_attention_layers.\1.self_attn.out_proj.",
r"module.pooler.blocks.(\d+).mlp.fc(\d+).": r"pooler.self_attention_layers.\1.mlp.fc\2.",
r"module.linear.": r"classifier.",
}
# fmt: on
def get_id2label_mapping(dataset_name: str) -> dict[int, str]:
path = hf_hub_download(
repo_id="huggingface/label-files",
filename=f"{dataset_name}-id2label.json",
repo_type="dataset",
)
with open(path, "r") as f:
id2label = json.load(f)
id2label = {int(k): v for k, v in id2label.items()}
return id2label
def split_qkv(state_dict):
state_dict = state_dict.copy()
keys = list(state_dict.keys())
for key in keys:
if ".qkv." in key:
tensor = state_dict.pop(key)
q, k, v = torch.chunk(tensor, 3, dim=0)
state_dict[key.replace(".qkv.", ".q.")] = q
state_dict[key.replace(".qkv.", ".k.")] = k
state_dict[key.replace(".qkv.", ".v.")] = v
elif ".kv." in key:
tensor = state_dict.pop(key)
k, v = torch.chunk(tensor, 2, dim=0)
state_dict[key.replace(".kv.", ".k.")] = k
state_dict[key.replace(".kv.", ".v.")] = v
return state_dict
def convert_old_keys_to_new_keys(state_dict):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
old_text = "\n".join(state_dict)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def main(args: argparse.Namespace):
model_params = CLASSIFIERS[args.model_name]
id2label = get_id2label_mapping(model_params["dataset"])
if not len(id2label) == model_params["num_labels"]:
raise ValueError(
f"Number of labels in id2label mapping ({len(id2label)}) does not "
f"match number of labels in model ({model_params['num_labels']})"
)
model = VJEPA2ForVideoClassification.from_pretrained(
model_params["base_model"],
num_labels=model_params["num_labels"],
id2label=id2label,
frames_per_clip=model_params["frames_per_clip"],
)
processor = VJEPA2VideoProcessor.from_pretrained(model_params["base_model"])
# load and convert classifier checkpoint
checkpoint = torch.hub.load_state_dict_from_url(model_params["checkpoint"])
state_dict = checkpoint["classifiers"][0]
state_dict_qkv_split = split_qkv(state_dict)
key_mapping = convert_old_keys_to_new_keys(state_dict_qkv_split.keys())
converted_state_dict2 = {key_mapping[k]: v for k, v in state_dict_qkv_split.items()}
result = model.load_state_dict(converted_state_dict2, strict=False)
if result.unexpected_keys:
raise ValueError(f"Error loading state dict: {result.unexpected_keys}")
if not args.skip_verification:
# get inputs
video_reader = get_video()
frame_indexes = np.arange(0, 128, 128 / model_params["frames_per_clip"])
video = video_reader.get_batch(frame_indexes).asnumpy()
inputs = processor(video, return_tensors="pt").to(device)
# run model
model.to(device).eval()
with torch.no_grad():
outputs = model(**inputs)
# compare results
probs = torch.softmax(outputs.logits, dim=-1)
top_prob, top_idx = probs.topk(1)
top_prob, top_idx = top_prob.item(), top_idx.item()
label = id2label[top_idx]
expected_id, expected_prob, expected_label = model_params["result"]
if not top_idx == expected_id:
raise ValueError(f"Expected id {expected_id} but got {top_idx}")
if not label == expected_label:
raise ValueError(f"Expected label {expected_label} but got {label}")
if not np.isclose(top_prob, expected_prob, atol=1e-3):
raise ValueError(f"Expected prob {expected_prob} but got {top_prob}")
print("Verification passed")
output_dir = os.path.join(args.base_dir, args.model_name)
model.save_pretrained(output_dir)
processor.save_pretrained(output_dir)
if args.push_to_hub:
api = HfApi()
repo_id = f"{args.repo_org}/{args.model_name}"
if not api.repo_exists(repo_id):
api.create_repo(repo_id, repo_type="model")
api.upload_folder(folder_path=output_dir, repo_id=repo_id, repo_type="model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--base_dir", type=str, default="converted_models/")
parser.add_argument("--repo_org", type=str, default="qubvel-hf")
parser.add_argument("--push_to_hub", action="store_true")
parser.add_argument("--skip_verification", action="store_true")
args = parser.parse_args()
main(args)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vjepa2/convert_vjepa2_classifier_to_hf.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vjepa2/configuration_vjepa2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VJEPA 2 model configuration"""
from ...configuration_utils import PreTrainedConfig
class VJEPA2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VJEPA2Model`]. It is used to instantiate an
VJEPA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VJEPA2
[facebook/vjepa2-vitl-fpc64-256](https://huggingface.co/facebook/vjepa2-vitl-fpc64-256) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
crop_size (`int`, *optional*, defaults to 256):
Input resolution of the model
frames_per_clip (`int`, *optional*, defaults to 64):
The number of frames the model has been pretrained with. Does not impact inference.
tubelet_size (`int`, *optional*, defaults to 2):
The number of temporal frames used for a single rastor, check paper for more information.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers
in_chans (`int`, *optional*, defaults to 3):
The number of input channels
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Encoder
num_hidden_layers (`int`, *optional*, defaults to 24):
The number of hidden layers
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of the hidden size of the MLPs used in Encoder relative to the `hidden_size`.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for attentions.
The dropout probability for all fully connected layers.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for attentions.
num_pooler_layers (`int`, *optional*, defaults to 3):
The number of self-attention layers in the pooler.
pred_hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the predictor layers
pred_num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Predictor
pred_num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Predictor
pred_num_mask_tokens (`int`, *optional*, defaults to 10):
Define the number of mask tokens to use in the Predictor
pred_zero_init_mask_tokens (`bool`, *optional*, defaults to `True`):
Initialize the mask tokens in the predictor with 0.
pred_mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of the hidden size of the MLPs used in Predictor relative to the `pred_hidden_size`.
Example:
```python
>>> from transformers import VJEPA2Config, VJEPA2Model
>>> # Initializing a VJEPA2 vjepa2-vitl-fpc64-256 style configuration
>>> configuration = VJEPA2Config()
>>> # Initializing a model (with random weights) from the vjepa2-vitl-fpc64-256 style configuration
>>> model = VJEPA2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vjepa2"
def __init__(
self,
patch_size=16,
crop_size=256,
frames_per_clip=64,
tubelet_size=2,
hidden_size=1024,
in_chans=3,
num_attention_heads=16,
num_hidden_layers=24,
drop_path_rate=0.0,
mlp_ratio=4.0,
layer_norm_eps=1e-6,
qkv_bias=True,
attention_probs_dropout_prob=0.0,
hidden_act="gelu",
initializer_range=0.02,
attention_dropout=0.0,
num_pooler_layers=3,
# predictor params
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
pred_zero_init_mask_tokens=True,
pred_mlp_ratio=4.0,
**kwargs,
):
super().__init__(**kwargs)
self.crop_size = crop_size
self.frames_per_clip = frames_per_clip
self.patch_size = patch_size
self.tubelet_size = tubelet_size
self.hidden_size = hidden_size
self.in_chans = in_chans
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.drop_path_rate = drop_path_rate
self.mlp_ratio = mlp_ratio
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.image_size = crop_size
self.attention_dropout = attention_dropout
self.num_pooler_layers = num_pooler_layers
# predictor params
self.pred_hidden_size = pred_hidden_size
self.pred_num_attention_heads = pred_num_attention_heads
self.pred_num_hidden_layers = pred_num_hidden_layers
self.pred_num_mask_tokens = pred_num_mask_tokens
self.pred_zero_init_mask_tokens = pred_zero_init_mask_tokens
self.pred_mlp_ratio = pred_mlp_ratio
__all__ = ["VJEPA2Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vjepa2/configuration_vjepa2.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vjepa2/convert_vjepa2_to_hf.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import tempfile
from io import BytesIO
from pathlib import Path
import httpx
import numpy as np
import torch
from huggingface_hub import HfApi
from PIL import Image
from transformers import VJEPA2Config, VJEPA2Model, VJEPA2VideoProcessor
from transformers.models.vjepa2.modeling_vjepa2 import apply_masks
HUB_REPO = "https://github.com/facebookresearch/vjepa2"
HUB_SOURCE = "github"
HUB_MODELS = {
"vit_large": "facebook/vjepa2-vitl-fpc64-256",
"vit_huge": "facebook/vjepa2-vith-fpc64-256",
"vit_giant": "facebook/vjepa2-vitg-fpc64-256",
"vit_giant_384": "facebook/vjepa2-vitg-fpc64-384",
}
S3_MODELS = {
"vit_large": "https://dl.fbaipublicfiles.com/vjepa2/vitl.pt",
"vit_huge": "https://dl.fbaipublicfiles.com/vjepa2/vith.pt",
"vit_giant": "https://dl.fbaipublicfiles.com/vjepa2/vitg.pt",
"vit_giant_384": "https://dl.fbaipublicfiles.com/vjepa2/vitg-384.pt",
}
TOKEN = os.environ.get("HF_TOKEN", None)
def get_vjepa2_config(model_name):
# size of the architecture
if model_name == "vit_large":
return VJEPA2Config(
crop_size=256,
frames_per_clip=64,
hidden_size=1024,
num_attention_heads=16,
num_hidden_layers=24,
mlp_ratio=4,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
elif model_name == "vit_huge":
return VJEPA2Config(
crop_size=256,
frames_per_clip=64,
hidden_size=1280,
num_attention_heads=16,
num_hidden_layers=32,
mlp_ratio=4,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
elif model_name == "vit_giant":
return VJEPA2Config(
crop_size=256,
frames_per_clip=64,
hidden_size=1408,
num_attention_heads=22,
num_hidden_layers=40,
mlp_ratio=48 / 11,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
elif model_name == "vit_giant_384":
return VJEPA2Config(
crop_size=384,
frames_per_clip=64,
hidden_size=1408,
num_attention_heads=22,
num_hidden_layers=40,
mlp_ratio=48 / 11,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
else:
raise ValueError("Model not supported")
def convert_encoder_keys(model_state_dict, og_encoder_state_dict, config):
emb_dim = config.hidden_size
for key, val in og_encoder_state_dict.copy().items():
val = og_encoder_state_dict.pop(key)
key = key.replace("module.backbone.", "")
if key.startswith("blocks."):
key = key.replace("blocks.", "encoder.layer.")
if "attn." in key:
key = key.replace("attn.", "attention.")
if key == "pos_embed":
key = "encoder.embeddings.position_embeddings"
if "patch_embed." in key:
key = key.replace("patch_embed.", "encoder.embeddings.patch_embeddings.")
if key.startswith("norm."):
key = key.replace("norm.", "encoder.layernorm.")
if "qkv." in key:
prefix, suffix = key.split("qkv")
if "bias" in suffix:
q_e, k_e, v_e = (
val[0:emb_dim],
val[emb_dim : emb_dim * 2],
val[emb_dim * 2 :],
)
else:
q_e, k_e, v_e = (
val[0:emb_dim, :],
val[emb_dim : emb_dim * 2, :],
val[emb_dim * 2 :, :],
)
og_encoder_state_dict[prefix + "query" + suffix] = q_e
og_encoder_state_dict[prefix + "key" + suffix] = k_e
og_encoder_state_dict[prefix + "value" + suffix] = v_e
else:
og_encoder_state_dict[key] = val
return og_encoder_state_dict
def convert_predictor_keys(model_state_dict, og_predictor_state_dict, config):
emb_dim = config.pred_hidden_size
if "predictor_pos_embed" in og_predictor_state_dict:
del og_predictor_state_dict["predictor_pos_embed"]
# update predictor weights
mask_tokens = {}
mask_token_keys_to_delete = []
for key, val in og_predictor_state_dict.copy().items():
val = og_predictor_state_dict.pop(key)
key = key.replace("module.backbone.", "")
if key.startswith("predictor_blocks."):
key = key.replace("predictor_blocks.", "predictor.layer.")
if "attn." in key:
key = key.replace("attn.", "attention.")
if key == "predictor_pos_embed":
key = "predictor.embeddings.position_embeddings"
if "predictor_embed." in key:
key = key.replace("predictor_embed.", "predictor.embeddings.predictor_embeddings.")
if "mask_tokens." in key:
mask_tokens[key.split("mask_tokens.")[-1]] = val
mask_token_keys_to_delete.append(key)
# key = key.replace("mask_tokens.", "predictor.embeddings.mask_tokens.")
if key.startswith("predictor_norm."):
key = key.replace("predictor_norm.", "predictor.layernorm.")
if key.startswith("predictor_proj."):
key = key.replace("predictor_proj.", "predictor.proj.")
if "qkv." in key:
prefix, suffix = key.split("qkv")
if "bias" in suffix:
q_e, k_e, v_e = (
val[0:emb_dim],
val[emb_dim : emb_dim * 2],
val[emb_dim * 2 :],
)
else:
q_e, k_e, v_e = (
val[0:emb_dim, :],
val[emb_dim : emb_dim * 2, :],
val[emb_dim * 2 :, :],
)
og_predictor_state_dict[prefix + "query" + suffix] = q_e
og_predictor_state_dict[prefix + "key" + suffix] = k_e
og_predictor_state_dict[prefix + "value" + suffix] = v_e
else:
og_predictor_state_dict[key] = val
mask_tokens = torch.stack([mask_tokens[f"{i}"] for i in range(len(mask_tokens))], dim=0)
for k in mask_token_keys_to_delete:
del og_predictor_state_dict[k]
og_predictor_state_dict["predictor.embeddings.mask_tokens"] = mask_tokens
return og_predictor_state_dict
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read())).convert("RGB")
return image
def upload_original_ckpts(model_name):
hf_repo = HUB_MODELS[model_name]
original_ckpt = S3_MODELS[model_name]
print(f"Uploading original checkpoint for vjepa2 {model_name} to {hf_repo}/original/")
with tempfile.NamedTemporaryFile() as fn:
local_path = fn.name
torch.hub.download_url_to_file(original_ckpt, local_path)
api = HfApi()
api.upload_file(
repo_id=hf_repo,
path_or_fileobj=local_path,
path_in_repo="original/model.pth",
repo_type="model",
token=TOKEN,
)
print("Uploading complete")
@torch.no_grad()
def convert_and_test_vjepa2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our VJEPA2 structure.
"""
config = get_vjepa2_config(model_name)
# load original model from torch hub
original_encoder, original_predictor = torch.hub.load(HUB_REPO, "vjepa2_" + model_name, source=HUB_SOURCE)
original_encoder.eval()
original_predictor.eval()
original_preprocessor = torch.hub.load(
HUB_REPO, "vjepa2_preprocessor", source=HUB_SOURCE, crop_size=config.crop_size
)
# load state_dict of original model, remove and rename some keys
encoder_state_dict = original_encoder.state_dict()
decoder_state_dict = original_predictor.state_dict()
model = VJEPA2Model(config).eval()
state_dict = model.state_dict()
og_encoder_sd = convert_encoder_keys(state_dict, encoder_state_dict, config)
og_predictor_sd = convert_predictor_keys(state_dict, decoder_state_dict, config)
og_state_dict = og_encoder_sd
og_state_dict.update(og_predictor_sd)
model.load_state_dict(og_state_dict)
# load image
image = prepare_img()
image = torch.Tensor(np.array(image)).unsqueeze(0).permute(0, 3, 1, 2)
print("Input shape: ", image.shape)
crop_size = config.crop_size
processor = VJEPA2VideoProcessor(crop_size=crop_size)
pr_out = processor(image, return_tensors="pt")
pixel_values_videos = pr_out.pixel_values_videos
# run original preprocessor
original_pixel_values = original_preprocessor(image)
assert original_pixel_values[0].permute(1, 0, 2, 3).shape == pixel_values_videos[0].shape
assert torch.allclose(original_pixel_values[0].permute(1, 0, 2, 3), pixel_values_videos[0], atol=1e-3)
with torch.no_grad():
# reshape and move to gpu
if pixel_values_videos.size(1) == 1:
pixel_values_videos = pixel_values_videos.repeat(1, config.frames_per_clip, 1, 1, 1)
# pixel_values_videos = pixel_values_videos.permute(0, 2, 1, 3, 4) # B x C x T x H x W
pixel_values_videos = pixel_values_videos.to(device="cuda", dtype=torch.float32)
original_encoder = original_encoder.to(device="cuda", dtype=torch.float32)
original_predictor = original_predictor.to(device="cuda", dtype=torch.float32)
model = model.to(device="cuda", dtype=torch.float32)
# forward
original_encoder_outputs = original_encoder(pixel_values_videos.permute(0, 2, 1, 3, 4))
B, N, _ = original_encoder_outputs.shape
# test full mask
context_mask = [torch.arange(N, device=pixel_values_videos.device).unsqueeze(0).repeat((B, 1))]
predictor_mask = context_mask
original_predictor_outputs = original_predictor(original_encoder_outputs, context_mask, predictor_mask)
outputs = model(pixel_values_videos, context_mask=context_mask, target_mask=predictor_mask)
assert torch.allclose(outputs.last_hidden_state, original_encoder_outputs, atol=1e-3)
predictor_outputs = outputs.predictor_output
assert torch.allclose(predictor_outputs.last_hidden_state, original_predictor_outputs, atol=1e-3)
# test partial mask
window_size = 256
mask = torch.arange(N, device=pixel_values_videos.device).unsqueeze(0)
context_mask = [mask[:, :window_size].repeat((B, 1))]
predictor_mask = [mask[:, window_size : window_size * 2].repeat((B, 1))]
original_predictor_outputs = original_predictor(
apply_masks(original_encoder_outputs, context_mask),
context_mask,
predictor_mask,
)
outputs = model(pixel_values_videos, context_mask=context_mask, target_mask=predictor_mask)
assert torch.allclose(outputs.last_hidden_state, original_encoder_outputs, atol=1e-3)
predictor_outputs = outputs.predictor_output
assert torch.allclose(predictor_outputs.last_hidden_state, original_predictor_outputs, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
name = HUB_MODELS[model_name]
model.push_to_hub(name, private=True)
processor.push_to_hub(name, private=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="vit_large",
type=str,
choices=[
"vit_large",
"vit_huge",
"vit_giant",
"vit_giant_384",
],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
parser.add_argument("--upload_original", action="store_true", help="upload the original checkpoint")
args = parser.parse_args()
convert_and_test_vjepa2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
if args.upload_original:
upload_original_ckpts(args.model_name)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vjepa2/convert_vjepa2_to_hf.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vjepa2/modeling_vjepa2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
from .configuration_vjepa2 import VJEPA2Config
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
VJEPA Predictor outputs that also contains the masked encoder outputs
"""
)
class VJEPA2WithMaskedInputPredictorOutput(ModelOutput):
r"""
masked_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*, returned when `context_mask` is provided which is applied on VJEPA2Encoder outputs):
The masked hidden state of the model.
target_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*, returned when `target_mask` is provided which is applied on VJEPA2Encoder outputs):
The target hidden state of the model.
"""
last_hidden_state: torch.FloatTensor
masked_hidden_state: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
target_hidden_state: torch.FloatTensor | None = None
@dataclass
@auto_docstring(
custom_intro="""
VJEPA outputs that also contains the masked encoder outputs
Optionally contains the predictor outputs
"""
)
class VJEPA2WithMaskedInputModelOutput(ModelOutput):
r"""
masked_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*, returned when `context_mask` is provided which is applied on VJEPA2Encoder outputs):
The masked hidden state of the model.
predictor_output (`VJEPA2WithMaskedInputPredictorOutput`, *optional*):
The output from the Predictor module.
"""
last_hidden_state: torch.FloatTensor
masked_hidden_state: torch.FloatTensor | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
predictor_output: VJEPA2WithMaskedInputPredictorOutput | None = None
def to_tuple(self):
output = list(super().to_tuple())
if isinstance(output[-1], VJEPA2WithMaskedInputPredictorOutput):
output[-1] = output[-1].to_tuple()
return tuple(output)
class VJEPA2PatchEmbeddings3D(nn.Module):
"""
Image to Patch Embedding
"""
def __init__(
self,
config: VJEPA2Config,
hidden_size: int = 1024,
):
super().__init__()
self.patch_size = config.patch_size
self.tubelet_size = config.tubelet_size
self.hidden_size = hidden_size
self.proj = nn.Conv3d(
in_channels=config.in_chans,
out_channels=hidden_size,
kernel_size=(config.tubelet_size, config.patch_size, config.patch_size),
stride=(config.tubelet_size, config.patch_size, config.patch_size),
)
@staticmethod
def num_patches(config):
return (
(config.frames_per_clip // config.tubelet_size)
* (config.crop_size // config.patch_size)
* (config.crop_size // config.patch_size)
)
def forward(self, pixel_values_videos: torch.Tensor) -> torch.Tensor:
x = self.proj(pixel_values_videos).flatten(2).transpose(1, 2)
return x
class VJEPA2Embeddings(nn.Module):
"""
Construct mask token, position and patch embeddings.
"""
def __init__(self, config: VJEPA2Config, hidden_size: int = 1024):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.patch_embeddings = VJEPA2PatchEmbeddings3D(config, hidden_size=hidden_size)
self.num_patches = self.patch_embeddings.num_patches
self.patch_size = config.patch_size
def forward(self, pixel_values_videos: torch.Tensor) -> torch.Tensor:
num_frames = pixel_values_videos.shape[1]
# Swap `frames` and `channels` dims, the result is:
# (batch_size, channels, num_frames, height, width)
pixel_values_videos = pixel_values_videos.permute(0, 2, 1, 3, 4)
# For some cases, if the input vision (image/video) consists of num_frames < tubelet_size,
# then embedding lookup fails. In these cases, we duplicate the frames.
if num_frames < self.config.tubelet_size:
pixel_values_videos = pixel_values_videos.repeat(1, 1, self.config.tubelet_size, 1, 1)
target_dtype = self.patch_embeddings.proj.weight.dtype
pixel_values_videos = pixel_values_videos.to(dtype=target_dtype)
embeddings = self.patch_embeddings(pixel_values_videos)
return embeddings
# Adapted from transformers.models.vit.modeling_vit.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs,
):
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
# Normalize the attention scores to probabilities.
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_queries_or_keys(x, pos):
B, num_heads, N, D = x.size()
# similar to inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
# they are computing this every time. instead HF style is to compute the inv_freq once and store it
# -- compute angle for each position
omega = torch.arange(D // 2, dtype=x.dtype, device=x.device)
omega /= D / 2.0
omega = 1.0 / 10000**omega # (D/2,)
freq = pos.unsqueeze(-1) * omega # (..., N, D/2), outer product
# -- build rotation matrix and apply
emb_sin = freq.sin() # (..., N, D/2)
emb_cos = freq.cos() # (..., N, D/2)
emb_sin = emb_sin.repeat(1, 1, 1, 2)
emb_cos = emb_cos.repeat(1, 1, 1, 2)
# --
y = x.unflatten(-1, (-1, 2))
y1, y2 = y.unbind(dim=-1)
y = torch.stack((-y2, y1), dim=-1)
y = y.flatten(-2)
return (x * emb_cos) + (y * emb_sin)
class VJEPA2RopeAttention(nn.Module):
def __init__(
self,
config: VJEPA2Config,
hidden_size: int = 1024,
num_attention_heads: int = 16,
):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size {(hidden_size,)} is not a multiple of the number of attention "
f"heads {num_attention_heads}."
)
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.proj = nn.Linear(hidden_size, hidden_size)
self.dropout_prob = config.attention_probs_dropout_prob
self.dropout = nn.Dropout(self.dropout_prob)
self.grid_size = self.config.crop_size // self.config.patch_size
self.grid_depth = self.config.frames_per_clip // self.config.tubelet_size
self.d_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.h_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.w_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
def _get_frame_pos(self, ids):
tokens_per_frame = int(self.grid_size * self.grid_size)
return ids // tokens_per_frame
def _get_height_pos(self, ids):
# Remove frame component from ids
tokens_per_frame = int(self.grid_size * self.grid_size)
frame_ids = self._get_frame_pos(ids)
ids = ids - tokens_per_frame * frame_ids
# --
tokens_per_row = self.grid_size
return ids // tokens_per_row
def get_position_ids(self, x, masks=None):
device = x.device
token_size = x.size(1)
# Note: when masks is none, we use a 1d id instead of Bxnum_attention_heads mask,
# as 1d vector is broadcasted to the correct shapes.
if masks is not None:
ids = masks.unsqueeze(1).repeat(1, self.num_attention_heads, 1)
else:
ids = torch.arange(token_size, device=device)
# change to allow for extrapolation
tokens_per_frame = int(self.grid_size * self.grid_size)
frame_ids = self._get_frame_pos(ids)
# --
tokens_per_row = self.grid_size
height_ids = self._get_height_pos(ids)
# --
# Remove frame component from ids (1st term) and height component (2nd term)
width_ids = (ids - tokens_per_frame * frame_ids) - tokens_per_row * height_ids
return frame_ids, height_ids, width_ids
def apply_rotary_embeddings(self, qk, pos_ids):
d_mask, h_mask, w_mask = pos_ids
s = 0
qkd = rotate_queries_or_keys(qk[..., s : s + self.d_dim], pos=d_mask)
s += self.d_dim
qkh = rotate_queries_or_keys(qk[..., s : s + self.h_dim], pos=h_mask)
s += self.h_dim
qkw = rotate_queries_or_keys(qk[..., s : s + self.w_dim], pos=w_mask)
s += self.w_dim
# Combine rotated dimension
if s < self.attention_head_size:
qkr = qk[..., s:]
qk = torch.cat([qkd, qkh, qkw, qkr], dim=-1)
else:
qk = torch.cat([qkd, qkh, qkw], dim=-1)
return qk
def forward(
self,
hidden_states,
position_mask: torch.Tensor | None = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, torch.Tensor] | tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
pos_ids = self.get_position_ids(hidden_states, masks=position_mask)
key_layer = self.apply_rotary_embeddings(key_layer, pos_ids)
query_layer = self.apply_rotary_embeddings(query_layer, pos_ids)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = self.proj(context_layer.reshape(new_context_layer_shape))
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Adapted from transformers.models.beit.modeling_dinov2.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Adapted from transformers.models.beit.modeling_beit.BeitDropPath
class VJEPA2DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float | None = None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class VJEPA2MLP(nn.Module):
def __init__(self, config: VJEPA2Config, hidden_size: int = 1024, mlp_ratio: float = 4.0):
super().__init__()
in_features = out_features = hidden_size
hidden_features = int(hidden_size * mlp_ratio)
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.activation = ACT2FN[config.hidden_act]
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.fc2(hidden_state)
return hidden_state
class VJEPA2Layer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(
self,
config: VJEPA2Config,
drop_path_rate: float = 0.0,
hidden_size: int = 1024,
num_attention_heads: int = 16,
mlp_ratio: float = 4.0,
):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.norm1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.attention = VJEPA2RopeAttention(config, hidden_size, num_attention_heads)
self.drop_path = VJEPA2DropPath(drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.mlp = VJEPA2MLP(config, hidden_size=hidden_size, mlp_ratio=mlp_ratio)
def forward(
self,
hidden_states: torch.Tensor,
position_mask: torch.Tensor | None = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, ...]:
# Self-Attention
residual = hidden_states
hidden_states = self.norm1(hidden_states)
self_attention_outputs = self.attention(
hidden_states,
position_mask=position_mask, # position mask for context/target selection
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
hidden_states = self.drop_path(attention_output) + residual
# MLP
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
# Add self attentions if we output attention weights
outputs = self_attention_outputs[1:]
outputs = (hidden_states,) + outputs
return outputs
class VJEPA2Encoder(nn.Module):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.embeddings = VJEPA2Embeddings(config, hidden_size=config.hidden_size)
drop_path_rates = [
(config.drop_path_rate * i / (config.num_hidden_layers - 1) if config.num_hidden_layers > 1 else 0.0)
for i in range(config.num_hidden_layers)
]
self.layer = nn.ModuleList(
[
VJEPA2Layer(
config,
drop_path_rate=drop_path_rates[i],
hidden_size=config.hidden_size,
num_attention_heads=config.num_attention_heads,
mlp_ratio=config.mlp_ratio,
)
for i in range(config.num_hidden_layers)
]
)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
pixel_values_videos: torch.Tensor | None = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
**kwargs,
) -> BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
hidden_states = self.embeddings(pixel_values_videos)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, None, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layernorm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
def apply_masks(tensor: torch.Tensor, masks: list[torch.Tensor]) -> torch.Tensor:
"""
Args:
tensor (`torch.Tensor`):
Tensor of shape [batch_size, num_patches, feature_dim]
masks (`List[torch.Tensor]`):
List of tensors of shape [batch_size, num_patches] containing indices of patches to keep
"""
all_masked_tensors = []
for mask in masks:
mask = mask.to(tensor.device)
mask_keep = mask.unsqueeze(-1).repeat(1, 1, tensor.size(-1))
all_masked_tensors += [torch.gather(tensor, dim=1, index=mask_keep)]
return torch.cat(all_masked_tensors, dim=0)
class VJEPA2PredictorEmbeddings(nn.Module):
"""
Construct mask token, position and patch embeddings.
"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.predictor_embeddings = nn.Linear(config.hidden_size, config.pred_hidden_size)
self.num_mask_tokens = 0
self.zero_init_mask_tokens = config.pred_zero_init_mask_tokens
self.num_mask_tokens = config.pred_num_mask_tokens
self.mask_tokens = nn.Parameter(torch.zeros(self.num_mask_tokens, 1, 1, config.pred_hidden_size))
self.patch_size = config.patch_size
self.config = config
@staticmethod
def num_patches(config):
if config.frames_per_clip > 1:
return (
(config.frames_per_clip // config.tubelet_size)
* (config.crop_size // config.patch_size)
* (config.crop_size // config.patch_size)
)
else:
return (config.crop_size // config.patch_size) * (config.crop_size // config.patch_size)
def forward(
self,
hidden_states: torch.Tensor,
context_mask: list[torch.Tensor],
target_mask: list[torch.Tensor],
mask_index: int = 1,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
hidden_states : encoder outputs (context)
context_mask: tokens of the context (outputs from the encoder)
target_mask: tokens to predict
mask_index: index of the target mask to choose (useful for multiclip?)
"""
B = hidden_states.size(0)
context = self.predictor_embeddings(hidden_states)
# Make target tokens
mask_index = mask_index % self.num_mask_tokens
target = self.mask_tokens[mask_index]
# Note: this is problematic if the config isn't initialized with the right frames_per_clip value,
# e.g. for scenarios if we want to run predictor for more tokens than in the config.
# target = target.repeat(B, self.num_patches(self.config), 1)
# Remedy: use the provided target mask to get the max patch num
max_patch_num = target_mask[0].max() + 1 # one extra to include the last patch
target = target.repeat(B, max_patch_num, 1)
target = apply_masks(target, target_mask)
# Concatenate context & target tokens
context = context.repeat(len(context_mask), 1, 1)
embeddings = torch.cat([context, target], dim=1)
# Positions of context & target tokens
cm = torch.cat(context_mask, dim=0)
tm = torch.cat(target_mask, dim=0)
masks = torch.cat([cm, tm], dim=1)
return embeddings, masks
class VJEPA2Predictor(nn.Module):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.gradient_checkpointing = False
self.embeddings = VJEPA2PredictorEmbeddings(config)
drop_path_rates = [
(
config.drop_path_rate * i / (config.pred_num_hidden_layers - 1)
if config.pred_num_hidden_layers > 1
else 0.0
)
for i in range(config.pred_num_hidden_layers)
]
self.layer = nn.ModuleList(
[
VJEPA2Layer(
config,
drop_path_rate=drop_path_rates[i],
hidden_size=config.pred_hidden_size,
num_attention_heads=config.pred_num_attention_heads,
mlp_ratio=config.pred_mlp_ratio,
)
for i in range(config.pred_num_hidden_layers)
]
)
self.layernorm = nn.LayerNorm(config.pred_hidden_size, eps=config.layer_norm_eps)
self.proj = nn.Linear(config.pred_hidden_size, config.hidden_size, bias=True)
def sort_tokens(self, hidden_states, position_masks, argsort):
# gather position masks
argsort = argsort.to(position_masks.device)
position_masks = torch.gather(position_masks, dim=1, index=argsort)
# gather hidden states
argsort = argsort.to(hidden_states.device)
hidden_states_argsort = argsort.unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
hidden_states = torch.gather(hidden_states, dim=1, index=hidden_states_argsort)
return hidden_states, position_masks
def unsort_tokens(self, hidden_states, argsort):
argsort = argsort.to(hidden_states.device)
reverse_argsort = torch.argsort(argsort, dim=1)
reverse_argsort = reverse_argsort.unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
hidden_states = torch.gather(hidden_states, dim=1, index=reverse_argsort)
return hidden_states
@can_return_tuple
def forward(
self,
encoder_hidden_states: torch.Tensor,
context_mask: list[torch.Tensor],
target_mask: list[torch.Tensor],
output_attentions: bool = False,
output_hidden_states: bool = False,
**kwargs,
) -> BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
# mask out the encoder hidden states
# this is implemented here as in VJEPA training a separate encoder is used for target
encoder_hidden_states = apply_masks(encoder_hidden_states, context_mask)
_, N_ctxt, D = encoder_hidden_states.shape
hidden_states, position_masks = self.embeddings(encoder_hidden_states, context_mask, target_mask)
# Put tokens in sorted order
argsort = torch.argsort(position_masks, dim=1) # [B, N]
hidden_states, position_masks = self.sort_tokens(hidden_states, position_masks, argsort)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, position_masks, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.layernorm(hidden_states)
# unsort and extract the predicted tokens
hidden_states = self.unsort_tokens(hidden_states, argsort)
hidden_states = hidden_states[:, N_ctxt:]
# projection
hidden_states = self.proj(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class VJEPA2PoolerSelfAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
output_attentions: bool | None = False,
) -> tuple[torch.Tensor, torch.Tensor | None]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
class VJEPA2PoolerCrossAttention(nn.Module):
"""It's different from other cross-attention layers, doesn't have output projection layer (o_proj)"""
# in case of modular refactoring - o_proj can be replaces with nn.Identity()
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
queries: torch.Tensor,
keys: torch.Tensor,
values: torch.Tensor,
attention_mask: torch.Tensor | None = None,
output_attentions: bool | None = False,
) -> tuple[torch.Tensor, torch.Tensor | None]:
"""Input shape: Batch x Time x Channel"""
batch_size, q_seq_length, embed_dim = queries.shape
kv_seq_length = keys.shape[1]
queries = self.q_proj(queries)
keys = self.k_proj(keys)
values = self.v_proj(values)
queries = queries.view(batch_size, q_seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, kv_seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, kv_seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, q_seq_length, embed_dim).contiguous()
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# Modified from SiglipEncoderLayer, but we have to propagate proper hidden_size to VJEPA2MLP
class VJEPA2PoolerSelfAttentionLayer(GradientCheckpointingLayer):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self_attn = VJEPA2PoolerSelfAttention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = VJEPA2MLP(config, hidden_size=config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool | None = False,
) -> tuple[torch.Tensor, ...]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class VJEPA2PoolerCrossAttentionLayer(GradientCheckpointingLayer):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cross_attn = VJEPA2PoolerCrossAttention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = VJEPA2MLP(config, hidden_size=config.hidden_size)
def forward(
self,
queries: torch.Tensor,
hidden_state: torch.Tensor,
attention_mask: torch.Tensor | None = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, ...]:
# Apply cross-attention
residual = queries
hidden_state = self.layer_norm1(hidden_state)
hidden_state, *attn_weights = self.cross_attn(
queries,
hidden_state,
hidden_state,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_state = residual + hidden_state
# Apply MLP
residual = hidden_state
hidden_state = self.layer_norm2(hidden_state)
hidden_state = self.mlp(hidden_state)
hidden_state = residual + hidden_state
outputs = (hidden_state,)
if output_attentions:
outputs += tuple(attn_weights)
return outputs
class VJEPA2AttentivePooler(nn.Module):
"""Attentive Pooler"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.query_tokens = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.cross_attention_layer = VJEPA2PoolerCrossAttentionLayer(config)
self.self_attention_layers = nn.ModuleList(
[VJEPA2PoolerSelfAttentionLayer(config) for _ in range(config.num_pooler_layers)]
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
for layer in self.self_attention_layers:
hidden_state = layer(hidden_state, attention_mask=None)[0]
queries = self.query_tokens.repeat(hidden_state.shape[0], 1, 1)
hidden_state = self.cross_attention_layer(queries, hidden_state)[0]
return hidden_state.squeeze(1)
@auto_docstring
class VJEPA2PreTrainedModel(PreTrainedModel):
config: VJEPA2Config
base_model_prefix = "vjepa2"
main_input_name = "pixel_values_videos"
input_modalities = "video"
supports_gradient_checkpointing = True
_no_split_modules = [
"VJEPA2Layer",
"VJEPA2PoolerSelfAttentionLayer",
"VJEPA2PoolerCrossAttentionLayer",
"VJEPA2PredictorEmbeddings",
]
_supports_sdpa = True
_supports_flash_attn = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
init_std = self.config.initializer_range
if isinstance(module, VJEPA2AttentivePooler):
init.trunc_normal_(module.query_tokens, std=init_std)
for i, layer in enumerate(module.self_attention_layers, 1):
std = init_std / (i**0.5)
init.trunc_normal_(layer.self_attn.out_proj.weight, std=std)
init.trunc_normal_(layer.mlp.fc2.weight, std=std)
std = init_std / (len(module.self_attention_layers) + 1) ** 0.5
init.trunc_normal_(module.cross_attention_layer.mlp.fc2.weight, std=std)
elif isinstance(module, VJEPA2PredictorEmbeddings):
if module.zero_init_mask_tokens:
init.zeros_(module.mask_tokens)
else:
init.trunc_normal_(module.mask_tokens, std=init_std)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Conv3d)):
init.trunc_normal_(module.weight, std=init_std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
@auto_docstring
class VJEPA2Model(VJEPA2PreTrainedModel):
def __init__(self, config: VJEPA2Config):
super().__init__(config)
self.config = config
self.encoder = VJEPA2Encoder(config)
self.predictor = VJEPA2Predictor(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> VJEPA2PatchEmbeddings3D:
return self.encoder.embeddings.patch_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values_videos: torch.Tensor,
context_mask: list[torch.Tensor] | None = None,
target_mask: list[torch.Tensor] | None = None,
skip_predictor: bool = False,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
**kwargs,
) -> VJEPA2WithMaskedInputModelOutput:
r"""
context_mask (`torch.Tensor` with shape `[batch_size, patch_size, 1]`, *optional*):
The mask position ids indicating which encoder output patches are going to be exposed to the predictor.
By default, this mask is created as torch.arange(N).unsqueeze(0).repeat(B,1), indicating full context
available to the predictor.
target_mask (`torch.Tensor` with shape `[batch_size, patch_size, 1]`, *optional*):
The mask position ids indicating which encoder output patches are going to be used as a prediction target
for the predictor. By default, this mask is created as torch.arange(N).unsqueeze(0).repeat(B,1), indicating
that the predictor should predict all encoder patches.
skip_predictor (bool):
flag to skip the predictor forward, useful if you just need the encoder outputs
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if pixel_values_videos is None:
raise ValueError("You have to specify pixel_values_videos")
encoder_outputs: BaseModelOutput = self.encoder(
pixel_values_videos=pixel_values_videos,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs.last_hidden_state
if context_mask is None and target_mask is None:
B = pixel_values_videos.size(0)
N = sequence_output.size(1) # ensure we are using dynamic patch size
context_mask = [torch.arange(N, device=pixel_values_videos.device).unsqueeze(0).repeat((B, 1))]
target_mask = [torch.arange(N, device=pixel_values_videos.device).unsqueeze(0).repeat((B, 1))]
if not skip_predictor:
predictor_outputs: BaseModelOutput = self.predictor(
encoder_hidden_states=sequence_output,
context_mask=context_mask,
target_mask=target_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
predictor_output = VJEPA2WithMaskedInputPredictorOutput(
last_hidden_state=predictor_outputs.last_hidden_state,
target_hidden_state=apply_masks(sequence_output, target_mask),
hidden_states=predictor_outputs.hidden_states,
attentions=predictor_outputs.attentions,
)
else:
predictor_output = None
encoder_output = VJEPA2WithMaskedInputModelOutput(
last_hidden_state=sequence_output,
masked_hidden_state=apply_masks(sequence_output, context_mask),
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
predictor_output=predictor_output,
)
return encoder_output
def get_vision_features(self, pixel_values_videos) -> torch.Tensor:
encoder_output = self.forward(pixel_values_videos, skip_predictor=True)
return encoder_output.last_hidden_state
@auto_docstring(
custom_intro="""
V-JEPA 2 Model transformer with a video classification head on top (a linear layer on top of the attentive pooler).
"""
)
class VJEPA2ForVideoClassification(VJEPA2PreTrainedModel):
def __init__(self, config: VJEPA2Config):
super().__init__(config)
self.num_labels = config.num_labels
self.vjepa2 = VJEPA2Model(config)
# Classifier head
self.pooler = VJEPA2AttentivePooler(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels, bias=True)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values_videos: torch.Tensor,
labels: torch.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
**kwargs,
) -> tuple | ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import numpy as np
>>> from transformers import AutoVideoProcessor, VJEPA2ForVideoClassification
>>> device = "cuda"
>>> video_processor = AutoVideoProcessor.from_pretrained("facebook/vjepa2-vitl-fpc16-256-ssv2")
>>> model = VJEPA2ForVideoClassification.from_pretrained("facebook/vjepa2-vitl-fpc16-256-ssv2").to(device)
>>> video = np.ones((64, 256, 256, 3)) # 64 frames, 256x256 RGB
>>> inputs = video_processor(video, return_tensors="pt").to(device)
>>> # For inference
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
>>> # For training
>>> labels = torch.ones(1, dtype=torch.long, device=device)
>>> loss = model(**inputs, labels=labels).loss
```"""
outputs = self.vjepa2(
pixel_values_videos=pixel_values_videos,
skip_predictor=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden_state = outputs.last_hidden_state
pooler_output = self.pooler(last_hidden_state)
logits = self.classifier(pooler_output)
loss = None
if labels is not None:
loss = self.loss_function(pooled_logits=logits, labels=labels, config=self.config)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["VJEPA2Model", "VJEPA2PreTrainedModel", "VJEPA2ForVideoClassification"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vjepa2/modeling_vjepa2.py",
"license": "Apache License 2.0",
"lines": 946,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vjepa2/video_processing_vjepa2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Video processor class for VJEPA2."""
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
from ...processing_utils import Unpack, VideosKwargs
from ...video_processing_utils import BaseVideoProcessor
class VJEPA2VideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": int(256 * 256 / 224)}
crop_size = 256
do_resize = True
do_rescale = True
do_center_crop = True
do_normalize = True
def __init__(self, **kwargs: Unpack[VideosKwargs]):
crop_size = kwargs.get("crop_size", 256)
if not isinstance(crop_size, int):
if not isinstance(crop_size, dict) or "height" not in crop_size:
raise ValueError("crop_size must be an integer or a dictionary with a 'height' key")
crop_size = crop_size["height"]
resize_size = int(crop_size * 256 / 224)
kwargs["size"] = {"shortest_edge": resize_size}
super().__init__(**kwargs)
__all__ = ["VJEPA2VideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vjepa2/video_processing_vjepa2.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/vjepa2/test_modeling_vjepa2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch V-JEPA2 model."""
import unittest
from functools import cached_property
import numpy as np
from transformers import VJEPA2Config
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
from ...test_video_processing_common import (
prepare_video_inputs,
)
if is_torch_available():
import torch
from torch import nn
from transformers import VJEPA2ForVideoClassification, VJEPA2Model
if is_vision_available():
from PIL import Image
from transformers import AutoVideoProcessor
VJEPA_HF_MODEL = "facebook/vjepa2-vitl-fpc64-256"
class VJEPA2ModelTester:
def __init__(
self,
parent,
batch_size=2,
image_size=16,
patch_size=16,
num_channels=3,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_frames=2,
mlp_ratio=1,
pred_hidden_size=32,
pred_num_attention_heads=2,
pred_num_hidden_layers=2,
pred_num_mask_tokens=10,
is_training=False,
attn_implementation="sdpa",
mask_ratio=0.5,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_frames = num_frames
self.mlp_ratio = mlp_ratio
self.pred_hidden_size = pred_hidden_size
self.pred_num_attention_heads = pred_num_attention_heads
self.pred_num_hidden_layers = pred_num_hidden_layers
self.pred_num_mask_tokens = pred_num_mask_tokens
self.attn_implementation = attn_implementation
self.is_training = is_training
self.mask_ratio = mask_ratio
num_patches = ((image_size // patch_size) ** 2) * (num_frames // 2)
self.seq_length = num_patches
self.num_masks = int(self.mask_ratio * self.seq_length)
self.mask_length = num_patches
def prepare_config_and_inputs(self):
pixel_values_videos = floats_tensor(
[
self.batch_size,
self.num_frames,
self.num_channels,
self.image_size,
self.image_size,
]
)
config = self.get_config()
return config, pixel_values_videos
def get_config(self):
return VJEPA2Config(
crop_size=self.image_size,
frames_per_clip=self.num_frames,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
num_hidden_layers=self.num_hidden_layers,
mlp_ratio=self.mlp_ratio,
pred_hidden_size=self.pred_hidden_size,
pred_num_attention_heads=self.pred_num_attention_heads,
pred_num_hidden_layers=self.pred_num_hidden_layers,
pred_num_mask_tokens=self.pred_num_mask_tokens,
)
def create_and_check_model(self, config, pixel_values_videos):
model = VJEPA2Model(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values_videos)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.seq_length, self.hidden_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values_videos,
) = config_and_inputs
inputs_dict = {"pixel_values_videos": pixel_values_videos}
return config, inputs_dict
@require_torch
class VJEPA2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as VJEPA2 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VJEPA2Model, VJEPA2ForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = {}
test_resize_embeddings = False
def setUp(self):
self.model_tester = VJEPA2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=VJEPA2Config, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="VJEPA2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="VJEPA2 does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
def prepare_random_video(image_size=256):
videos = prepare_video_inputs(
batch_size=1,
num_frames=16,
num_channels=3,
min_resolution=image_size,
max_resolution=image_size,
equal_resolution=True,
return_tensors="torch",
)
return videos
@require_torch
@require_vision
class VJEPA2ModelIntegrationTest(unittest.TestCase):
@cached_property
def default_video_processor(self):
return AutoVideoProcessor.from_pretrained(VJEPA_HF_MODEL) if is_vision_available() else None
@slow
def test_inference_image(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL).to(torch_device)
video_processor = self.default_video_processor
image = prepare_img()
inputs = video_processor(torch.Tensor(np.array(image)), return_tensors="pt").to(torch_device)
pixel_values_videos = inputs.pixel_values_videos
pixel_values_videos = pixel_values_videos.repeat(1, model.config.frames_per_clip, 1, 1, 1)
# forward pass
with torch.no_grad():
outputs = model(pixel_values_videos)
# verify the last hidden states
expected_shape = torch.Size((1, 8192, 1024))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.0061, -1.8365, 2.7343], [-2.5938, -2.7181, -0.1663], [-1.7993, -2.2430, -1.1388]],
device=torch_device,
)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=8e-2, atol=8e-2)
@slow
def test_inference_video(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL).to(torch_device)
video_processor = self.default_video_processor
video = prepare_random_video()
inputs = video_processor(video, return_tensors="pt").to(torch_device)
pixel_values_videos = inputs.pixel_values_videos
# forward pass
with torch.no_grad():
outputs = model(pixel_values_videos)
# verify the last hidden states
expected_shape = torch.Size((1, 2048, 1024))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
@slow
def test_predictor_outputs(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL).to(torch_device)
video_processor = self.default_video_processor
video = prepare_random_video()
inputs = video_processor(video, return_tensors="pt").to(torch_device)
pixel_values_videos = inputs.pixel_values_videos
# forward pass
with torch.no_grad():
outputs = model(pixel_values_videos)
# verify the last hidden states
expected_shape = torch.Size((1, 2048, 1024))
self.assertEqual(outputs.predictor_output.last_hidden_state.shape, expected_shape)
@slow
def test_predictor_full_mask(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL).to(torch_device)
video_processor = self.default_video_processor
video = prepare_random_video()
inputs = video_processor(video, return_tensors="pt").to(torch_device)
pixel_values_videos = inputs.pixel_values_videos
# forward pass
with torch.no_grad():
context_mask = [torch.arange(2048, device=pixel_values_videos.device).unsqueeze(0)]
predictor_mask = context_mask
outputs = model(pixel_values_videos, context_mask=context_mask, target_mask=predictor_mask)
# verify the last hidden states
expected_shape = torch.Size((1, 2048, 1024))
self.assertEqual(outputs.predictor_output.last_hidden_state.shape, expected_shape)
@slow
def test_predictor_partial_mask(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL).to(torch_device)
video_processor = self.default_video_processor
video = prepare_random_video()
inputs = video_processor(video, return_tensors="pt").to(torch_device)
pixel_values_videos = inputs.pixel_values_videos
num_patches = 2048
num_masks = 100
# forward pass
with torch.no_grad():
pos_ids = torch.arange(num_patches, device=pixel_values_videos.device)
context_mask = [pos_ids[0 : num_patches - num_masks].unsqueeze(0)]
predictor_mask = [pos_ids[num_patches - num_masks :].unsqueeze(0)]
outputs = model(pixel_values_videos, context_mask=context_mask, target_mask=predictor_mask)
# verify the last hidden states
expected_shape = torch.Size((1, num_masks, 1024))
self.assertEqual(outputs.predictor_output.last_hidden_state.shape, expected_shape)
@slow
def test_video_classification(self):
checkpoint = "facebook/vjepa2-vitl-fpc16-256-ssv2"
model = VJEPA2ForVideoClassification.from_pretrained(checkpoint).to(torch_device)
video_processor = AutoVideoProcessor.from_pretrained(checkpoint)
sample_video = np.ones((16, 3, 256, 256))
inputs = video_processor(sample_video, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
self.assertEqual(outputs.logits.shape, (1, 174))
expected_logits = torch.tensor([0.8814, -0.1195, -0.6389], device=torch_device)
resulted_logits = outputs.logits[0, 100:103]
torch.testing.assert_close(resulted_logits, expected_logits, rtol=1e-2, atol=1e-2)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/vjepa2/test_modeling_vjepa2.py",
"license": "Apache License 2.0",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/zoedepth/image_processing_zoedepth_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for ZoeDepth."""
from typing import (
Optional,
Union,
)
import numpy as np
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import (
BatchFeature,
)
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
get_image_size,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
logging,
requires_backends,
)
from .image_processing_zoedepth import ZoeDepthImageProcessorKwargs, get_resize_output_image_size
from .modeling_zoedepth import ZoeDepthDepthEstimatorOutput
logger = logging.get_logger(__name__)
@auto_docstring
class ZoeDepthImageProcessorFast(BaseImageProcessorFast):
do_pad = True
do_rescale = True
do_normalize = True
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
do_resize = True
size = {"height": 384, "width": 512}
resample = PILImageResampling.BILINEAR
keep_aspect_ratio = True
ensure_multiple_of = 1 / 32
valid_kwargs = ZoeDepthImageProcessorKwargs
def __init__(self, **kwargs: Unpack[ZoeDepthImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
images: ImageInput,
**kwargs: Unpack[ZoeDepthImageProcessorKwargs],
) -> BatchFeature:
return super().preprocess(images, **kwargs)
def resize(
self,
images: "torch.Tensor",
size: SizeDict,
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
interpolation: Optional["tvF.InterpolationMode"] = None,
) -> "torch.Tensor":
"""
Resize an image or batchd images to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
images (`torch.Tensor`):
Images to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
interpolation (`tvF.InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
"""
if not size.height or not size.width:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size}")
output_size = get_resize_output_image_size(
images,
output_size=(size.height, size.width),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
input_data_format=ChannelDimension.FIRST,
)
height, width = output_size
resized_images = torch.nn.functional.interpolate(
images, (int(height), int(width)), mode=interpolation.value, align_corners=True
)
return resized_images
def _pad_images(
self,
images: "torch.Tensor",
):
"""
Args:
image (`torch.Tensor`):
Image to pad.
"""
height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)
pad_height = int(np.sqrt(height / 2) * 3)
pad_width = int(np.sqrt(width / 2) * 3)
return tvF.pad(images, padding=(pad_width, pad_height), padding_mode="reflect")
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
keep_aspect_ratio: bool | None,
ensure_multiple_of: int | None,
interpolation: Optional["tvF.InterpolationMode"],
do_pad: bool,
do_rescale: bool,
rescale_factor: float | None,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, rescale_factor)
if do_pad:
stacked_images = self._pad_images(images=stacked_images)
if do_resize:
stacked_images = self.resize(
stacked_images, size, keep_aspect_ratio, ensure_multiple_of, interpolation
)
if do_normalize:
stacked_images = self.normalize(stacked_images, image_mean, image_std)
resized_images_grouped[shape] = stacked_images
processed_images = reorder_images(resized_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_depth_estimation(
self,
outputs: "ZoeDepthDepthEstimatorOutput",
source_sizes: TensorType | list[tuple[int, int]] | None | None = None,
target_sizes: TensorType | list[tuple[int, int]] | None | None = None,
outputs_flipped: Union["ZoeDepthDepthEstimatorOutput", None] | None = None,
do_remove_padding: bool | None | None = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`ZoeDepthDepthEstimatorOutput`]):
Raw outputs of the model.
source_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the source size
(height, width) of each image in the batch before preprocessing. This argument should be dealt as
"required" unless the user passes `do_remove_padding=False` as input to this function.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*):
Raw outputs of the model from flipped input (averaged out in the end).
do_remove_padding (`bool`, *optional*):
By default ZoeDepth adds padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the
boundary artifacts in the output depth map, so we need remove this padding during post_processing. The
parameter exists here in case the user changed the image preprocessing to not include padding.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (outputs_flipped is not None) and (predicted_depth.shape != outputs_flipped.predicted_depth.shape):
raise ValueError("Make sure that `outputs` and `outputs_flipped` have the same shape")
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
if do_remove_padding is None:
do_remove_padding = self.do_pad
if source_sizes is None and do_remove_padding:
raise ValueError(
"Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False"
)
if (source_sizes is not None) and (len(predicted_depth) != len(source_sizes)):
raise ValueError(
"Make sure that you pass in as many source image sizes as the batch dimension of the logits"
)
if outputs_flipped is not None:
predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2
predicted_depth = predicted_depth.unsqueeze(1)
# Zoe Depth model adds padding around the images to fix the boundary artifacts in the output depth map
# The padding length is `int(np.sqrt(img_h/2) * fh)` for the height and similar for the width
# fh (and fw respectively) are equal to '3' by default
# Check [here](https://github.com/isl-org/ZoeDepth/blob/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/depth_model.py#L57)
# for the original implementation.
# In this section, we remove this padding to get the final depth image and depth prediction
padding_factor_h = padding_factor_w = 3
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes
for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes):
# depth.shape = [1, H, W]
if source_size is not None:
pad_h = pad_w = 0
if do_remove_padding:
pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h)
pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w)
depth = tvF.resize(
depth,
size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w],
interpolation=tvF.InterpolationMode.BICUBIC,
antialias=False,
)
if pad_h > 0:
depth = depth[:, pad_h:-pad_h, :]
if pad_w > 0:
depth = depth[:, :, pad_w:-pad_w]
if target_size is not None:
target_size = [target_size[0], target_size[1]]
depth = tvF.resize(
depth,
size=target_size,
interpolation=tvF.InterpolationMode.BICUBIC,
antialias=False,
)
depth = depth.squeeze(0)
# depth.shape = [H, W]
results.append({"predicted_depth": depth})
return results
__all__ = ["ZoeDepthImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/zoedepth/image_processing_zoedepth_fast.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/minimax/modular_minimax.py | # Copyright 2025 MiniMaxAI and HuggingFace Inc. teams. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MiniMax model."""
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...configuration_utils import PreTrainedConfig, layer_type_validation
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import MoeModelOutputWithPast
from ...modeling_rope_utils import RopeParameters
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import OutputRecorder, capture_outputs
from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding
from ..mixtral.modeling_mixtral import (
MixtralAttention,
MixtralDecoderLayer,
MixtralForCausalLM,
MixtralForQuestionAnswering,
MixtralForSequenceClassification,
MixtralForTokenClassification,
MixtralModel,
MixtralPreTrainedModel,
MixtralRMSNorm,
MixtralSparseMoeBlock,
MixtralTopKRouter,
)
logger = logging.get_logger(__name__)
class MiniMaxConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MiniMaxModel`]. It is used to instantiate an
MiniMax model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the MiniMax.
[MiniMaxAI/MiniMax-Text-01-hf](https://huggingface.co/MiniMaxAI/MiniMax-Text-01-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MiniMax model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MiniMaxModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
The maximum sequence length that this model might ever be used with. MiniMax's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `4096`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts_per_tok (`int`, *optional*, defaults to 2):
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter
num_local_experts (`int`, *optional*, defaults to 8):
Number of experts per Sparse MLP layer.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
router_jitter_noise (`float`, *optional*, defaults to 0.0):
Amount of noise to add to the router.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
layer_types (`list`, *optional*):
Attention pattern for each layer.
block_size (`int`, *optional*, defaults to 256):
The length of each attention block, determining how queries, keys, and values
are grouped and processed for intra- and inter-block attention.
full_attn_alpha_factor (`float`, *optional*, defaults to 1):
Weight for residual value in residual connection after normal attention.
full_attn_beta_factor (`float`, *optional*, defaults to 1):
Weight for hidden state value in residual connection after normal attention.
linear_attn_alpha_factor (`float`, *optional*, defaults to 1):
Weight for residual value in residual connection after lightning attention.
linear_attn_beta_factor (`float`, *optional*, defaults to 1):
Weight for hidden state value in residual connection after lightning attention.
mlp_alpha_factor (`float`, *optional*, defaults to 1):
Weight for residual value in residual connection after MLP.
mlp_beta_factor (`float`, *optional*, defaults to 1):
Weight for hidden state value in residual connection after MLP.
```python
>>> from transformers import MiniMaxModel, MiniMaxConfig
>>> # Initializing a MiniMax style configuration
>>> configuration = MiniMaxConfig()
>>> # Initializing a model from the MiniMax style configuration
>>> model = MiniMaxModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "minimax"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 1000000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate": "colwise_gather_output", # we need to replicate here to correctly route experts
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
attribute_map = {
"num_experts": "num_local_experts",
}
def __init__(
self,
vocab_size: int | None = 32000,
hidden_size: int | None = 4096,
intermediate_size: int | None = 14336,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 8,
head_dim: int | None = None,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 4096 * 32,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
tie_word_embeddings: bool | None = False,
sliding_window: int | None = None,
attention_dropout: float | None = 0.0,
num_experts_per_tok: int | None = 2,
num_local_experts: int | None = 8,
output_router_logits: bool | None = False,
router_aux_loss_coef: float | None = 0.001,
router_jitter_noise: float | None = 0.0,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
layer_types: list[str] | None = None,
block_size: int | None = 256,
full_attn_alpha_factor: int | None = 1,
full_attn_beta_factor: int | None = 1,
linear_attn_alpha_factor: int | None = 1,
linear_attn_beta_factor: int | None = 1,
mlp_alpha_factor: int | None = 1,
mlp_beta_factor: int | None = 1,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.head_dim = head_dim
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.router_jitter_noise = router_jitter_noise
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.layer_types = layer_types
self.block_size = block_size
self.full_attn_alpha_factor = full_attn_alpha_factor
self.full_attn_beta_factor = full_attn_beta_factor
self.linear_attn_alpha_factor = linear_attn_alpha_factor
self.linear_attn_beta_factor = linear_attn_beta_factor
self.mlp_alpha_factor = mlp_alpha_factor
self.mlp_beta_factor = mlp_beta_factor
if self.layer_types is None:
self.layer_types = [
"full_attention" if bool((i + 1) % 2) else "linear_attention" for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
class MiniMaxRMSNorm(MixtralRMSNorm):
pass
class MiniMaxCache(DynamicCache):
def __init__(self):
super().__init__()
self.linear_cache: list[torch.Tensor] = []
def set_linear_cache(self, layer_idx, linear_cache):
# There may be skipped layers, fill them with empty lists
for _ in range(len(self.linear_cache), layer_idx + 1):
self.linear_cache.append([])
self.linear_cache[layer_idx] = linear_cache
def get_linear_cache(self, layer_idx: int):
if layer_idx < len(self):
return self.linear_cache[layer_idx]
return None
def __len__(self):
return max(super().__len__(), len(self.linear_cache))
def batch_repeat_interleave(self, repeats: int):
for layer_idx in range(len(self)):
if self.linear_cache[layer_idx] != []:
self.linear_cache[layer_idx] = self.linear_cache[layer_idx].repeat_interleave(repeats, dim=0)
else:
self.layers[layer_idx].batch_repeat_interleave(repeats)
def batch_select_indices(self, indices: torch.Tensor):
for layer_idx in range(len(self)):
if self.linear_cache[layer_idx] != []:
self.linear_cache[layer_idx] = self.linear_cache[layer_idx][indices, ...]
else:
self.layers[layer_idx].batch_select_indices(indices)
def crop(self, max_length: int):
raise RuntimeError("MiniMaxCache doesnot support `crop` method")
class MiniMaxLightningAttention(nn.Module):
def __init__(self, config: MiniMaxConfig, layer_idx: int):
super().__init__()
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
self.num_attention_heads = config.num_attention_heads
self.num_hidden_layers = config.num_hidden_layers
self.block_size = config.block_size
self.act_fn = ACT2FN[config.hidden_act]
self.norm = MiniMaxRMSNorm(self.head_dim * self.num_attention_heads)
self.qkv_proj = nn.Linear(config.hidden_size, self.num_attention_heads * self.head_dim * 3, bias=False)
self.out_proj = nn.Linear(self.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.output_gate = nn.Linear(config.hidden_size, self.num_attention_heads * self.head_dim, bias=False)
slope_rate = self.get_slope_rate()
query_decay, key_decay, diagonal_decay = self.decay_factors(slope_rate)
self.register_buffer("slope_rate", slope_rate)
self.register_buffer("query_decay", query_decay)
self.register_buffer("key_decay", key_decay)
self.register_buffer("diagonal_decay", diagonal_decay)
def get_slope_rate(self):
base = 1 / (2 ** (8 / self.num_attention_heads))
exponent = torch.arange(self.num_attention_heads) + 1
factor = 1 - self.layer_idx / (self.num_hidden_layers - 1 + 1e-5) + 1e-5
rate = base**exponent
rate = rate * factor
rate = rate[:, None, None]
return rate
def decay_factors(self, slope_rate):
block_size_range = torch.arange(self.block_size) + 1
query_decay = torch.exp(-slope_rate * block_size_range[:, None])
key_decay = torch.exp(-slope_rate * (self.block_size - block_size_range[:, None]))
diagonal_decay = block_size_range[:, None] - block_size_range[None, :]
diagonal_decay = diagonal_decay[None, None, :, :]
diagonal_decay = slope_rate * diagonal_decay
diagonal_decay = torch.where(diagonal_decay >= 0, -diagonal_decay, float("-inf"))
diagonal_decay = torch.exp(diagonal_decay)
return query_decay, key_decay, diagonal_decay
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
batch_size, seq_len, hidden_size = hidden_states.shape
num_blocks = (seq_len + self.block_size - 1) // self.block_size
qkv_states = self.act_fn(self.qkv_proj(hidden_states))
qkv_states = qkv_states.reshape(batch_size, seq_len, self.num_attention_heads, 3 * self.head_dim)
query_states, key_states, value_states = torch.split(qkv_states, self.head_dim, dim=3)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
# calculated (K.T @ V) and saved as cache
attn_weights_inter = None
if past_key_values is not None:
attn_weights_inter = past_key_values.get_linear_cache(self.layer_idx)
if attn_weights_inter is None:
attn_weights_inter = torch.zeros(batch_size, self.num_attention_heads, self.head_dim, self.head_dim).to(
value_states
)
# apply attention_mask
if attention_mask is not None:
attention_mask = attention_mask.to(dtype=torch.bool) # Ensure it's a boolean tensor
value_states = value_states.masked_fill(~attention_mask.unsqueeze(1).unsqueeze(-1), 0)
attn_output = []
for i in range(num_blocks):
start_idx = i * self.block_size
end_idx = min(start_idx + self.block_size, seq_len)
current_block_size = end_idx - start_idx
current_query_states = query_states[:, :, start_idx:end_idx]
current_key_states = key_states[:, :, start_idx:end_idx]
current_value_states = value_states[:, :, start_idx:end_idx]
current_query_decay = self.query_decay[:, :current_block_size]
current_key_decay = self.key_decay[:, -current_block_size:]
current_diagonal_decay = self.diagonal_decay[:, :, :current_block_size, :current_block_size]
block_decay = torch.exp(-self.slope_rate * current_block_size)
# intra: ( Q @ K.T ) @ V -> QK * V
attn_weights_intra = torch.matmul(current_query_states, current_key_states.transpose(-1, -2))
attn_output_intra = torch.matmul(attn_weights_intra * current_diagonal_decay, current_value_states)
# inter: Q @ ( K.T @ V ) -> Q * KV
attn_output_inter = torch.matmul(current_query_states * current_query_decay, attn_weights_inter)
# final attention output
current_attn_output = attn_output_inter + attn_output_intra
attn_output.append(current_attn_output)
# calculate attn_weights_inter for next block or cache
next_attn_weights_inter = torch.matmul(
(current_key_states * current_key_decay).transpose(-1, -2), current_value_states
)
attn_weights_inter = attn_weights_inter * block_decay + next_attn_weights_inter
else:
ratio = torch.exp(-self.slope_rate)
attn_output = []
for i in range(seq_len):
current_query_states = query_states[:, :, i : i + 1]
current_key_states = key_states[:, :, i : i + 1]
current_value_states = value_states[:, :, i : i + 1]
current_attn_weights_inter = torch.matmul(current_key_states.transpose(-1, -2), current_value_states)
attn_weights_inter = ratio * attn_weights_inter + current_attn_weights_inter
current_attn_output = torch.matmul(current_query_states, attn_weights_inter)
attn_output.append(current_attn_output)
# concatenate attention outputs over all blocks
attn_output = torch.cat(attn_output, dim=-2)
# final output projection
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, seq_len, self.num_attention_heads * self.head_dim)
attn_output = self.norm(attn_output)
attn_output = F.sigmoid(self.output_gate(hidden_states)) * attn_output
attn_output = self.out_proj(attn_output)
# update cache
if past_key_values is not None:
past_key_values.set_linear_cache(self.layer_idx, attn_weights_inter)
return attn_output, attn_weights_inter
class MiniMaxRotaryEmbedding(Gemma2RotaryEmbedding):
pass
class MiniMaxAttention(MixtralAttention):
pass
class MiniMaxTopKRouter(MixtralTopKRouter):
pass
class MiniMaxSparseMoeBlock(MixtralSparseMoeBlock):
pass
class MiniMaxDecoderLayer(MixtralDecoderLayer, GradientCheckpointingLayer):
def __init__(self, config: MiniMaxConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.layer_idx = layer_idx
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.mlp_alpha_factor = config.mlp_alpha_factor
self.mlp_beta_factor = config.mlp_beta_factor
del self.mlp
self.mlp = MiniMaxSparseMoeBlock(config)
if self.layer_type == "linear_attention":
self.self_attn = MiniMaxLightningAttention(config, layer_idx)
self.attn_alpha_factor = config.linear_attn_alpha_factor
self.attn_beta_factor = config.linear_attn_beta_factor
else:
self.self_attn = MiniMaxAttention(config, layer_idx)
self.attn_alpha_factor = config.full_attn_alpha_factor
self.attn_beta_factor = config.full_attn_beta_factor
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
hidden_states = self.input_layernorm(hidden_states)
residual = hidden_states
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual * self.attn_alpha_factor + hidden_states * self.attn_beta_factor
hidden_states = self.post_attention_layernorm(hidden_states)
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = residual * self.mlp_alpha_factor + hidden_states * self.mlp_beta_factor
return hidden_states
class MiniMaxPreTrainedModel(MixtralPreTrainedModel):
_can_compile_fullgraph = False # uses a non-compilable custom cache class MiniMaxCache
_can_record_outputs = {
"router_logits": OutputRecorder(MiniMaxTopKRouter, layer_name="mlp.gate", index=0),
"hidden_states": MiniMaxDecoderLayer,
"attentions": [MiniMaxAttention, MiniMaxLightningAttention],
}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, MiniMaxLightningAttention):
slope_rate = module.get_slope_rate()
query_decay, key_decay, diagonal_decay = module.decay_factors(slope_rate)
init.copy_(module.slope_rate, slope_rate)
init.copy_(module.query_decay, query_decay)
init.copy_(module.key_decay, key_decay)
init.copy_(module.diagonal_decay, diagonal_decay)
class MiniMaxModel(MixtralModel):
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: MiniMaxCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = MiniMaxCache()
elif use_cache and not isinstance(past_key_values, MiniMaxCache):
raise ValueError(
f"MiniMax uses cache of its own and is not compatible with `past_key_values` of type {type(past_key_values)}."
)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
if decoder_layer.layer_type == "full_attention":
input_attention_mask = causal_mask
else:
# lightning attention uses original attention_mask, and uses it only for the first step
input_attention_mask = attention_mask
hidden_states = decoder_layer(
hidden_states,
attention_mask=input_attention_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
class MiniMaxForCausalLM(MixtralForCausalLM):
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MiniMaxForCausalLM
>>> model = MiniMaxForCausalLM.from_pretrained("MiniMaxAI/MiniMax-Text-01-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-Text-01-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
class MiniMaxForSequenceClassification(MixtralForSequenceClassification):
pass
class MiniMaxForTokenClassification(MixtralForTokenClassification):
pass
class MiniMaxForQuestionAnswering(MixtralForQuestionAnswering):
pass
__all__ = [
"MiniMaxConfig",
"MiniMaxPreTrainedModel",
"MiniMaxModel",
"MiniMaxForCausalLM",
"MiniMaxForSequenceClassification",
"MiniMaxForTokenClassification",
"MiniMaxForQuestionAnswering",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/minimax/modular_minimax.py",
"license": "Apache License 2.0",
"lines": 551,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/minimax/test_modeling_minimax.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch MiniMax model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
Expectations,
is_flaky,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import (
MiniMaxForCausalLM,
MiniMaxModel,
)
from transformers.models.minimax.modeling_minimax import MiniMaxCache
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class MiniMaxModelTester(CausalLMModelTester):
if is_torch_available():
base_model_class = MiniMaxModel
def __init__(self, parent, layer_types=None, block_size=3):
super().__init__(parent)
self.layer_types = layer_types
self.block_size = block_size
@require_torch
class MiniMaxModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = MiniMaxModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@is_flaky(max_attempts=2)
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_local_experts = 3
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MiniMaxForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_local_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = input_ids.shape[1] * 4
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (output_length - prompt_length))
use_cache = decoder_past_key_values is not None
for generated_length, iter_attentions in enumerate(attentions):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
expected_shape = (
batch_size,
config.num_attention_heads,
model_input_length,
prompt_length + generated_length,
)
for layer_idx, layer_attention in enumerate(iter_attentions):
if config.layer_types[layer_idx] == "full_attention":
self.assertEqual(layer_attention.shape, expected_shape)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, MiniMaxCache)
# (batch, head, seq_length, head_features)
key_value_cache_expected_shape = (
batch_size,
config.num_key_value_heads,
seq_length,
config.hidden_size // config.num_attention_heads,
)
# (batch, head, head_features, head_features)
linear_cache_expected_shape = (
batch_size,
config.num_attention_heads,
config.hidden_size // config.num_attention_heads,
config.hidden_size // config.num_attention_heads,
)
for layer_idx in range(config.num_hidden_layers):
if config.layer_types[layer_idx] == "full_attention":
self.assertEqual(past_key_values.layers[layer_idx].keys.shape, key_value_cache_expected_shape)
self.assertEqual(past_key_values.layers[layer_idx].values.shape, key_value_cache_expected_shape)
else:
self.assertEqual(past_key_values.linear_cache[layer_idx].shape, linear_cache_expected_shape)
def _check_caches_are_equal(self, cache1: MiniMaxCache, cache2: MiniMaxCache):
if not isinstance(cache1, MiniMaxCache) or not isinstance(cache2, MiniMaxCache):
raise ValueError("The wrong cache is being used!")
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
# We need this as MiniMaxCache uses the max between attention and linear caches for len...
if idx < len(cache1.layers):
torch.testing.assert_close(cache1.layers[idx].keys, cache1.layers[idx].keys)
torch.testing.assert_close(cache1.layers[idx].values, cache1.layers[idx].values)
torch.testing.assert_close(cache1.linear_cache[idx], cache2.linear_cache[idx])
@unittest.skip(reason="MiniMaxCache does not support `crop()` method")
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@unittest.skip(reason="MiniMaxCache does not support `crop()` method")
def test_assisted_decoding_sample(self):
pass
@unittest.skip(reason="MiniMaxCache does not support `crop()` method")
def test_assisted_decoding_matches_greedy_search_0_random(self):
pass
@unittest.skip(reason="MiniMaxCache does not support `crop()` method")
def test_assisted_decoding_matches_greedy_search_1_same(self):
pass
@unittest.skip("Model needs refactor")
def test_attention_outputs(self):
pass
@unittest.skip("MiniMax is special")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("MiniMax is special")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@unittest.skip("MiniMax is special")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("MiniMax is special")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
@require_torch_accelerator
@slow
class MiniMaxIntegrationTest(unittest.TestCase):
def test_small_model_logits(self):
model_id = "hf-internal-testing/MiniMax-tiny"
dummy_input = torch.LongTensor([[0, 1, 0], [0, 1, 0]]).to(torch_device)
model = MiniMaxForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
).to(torch_device)
with torch.no_grad():
logits = model(dummy_input).logits
logits = logits.float()
expectations = Expectations(
{
(None, None): [[1.0312, -0.5156, -0.3262], [-0.1152, 0.4336, 0.2412], [1.2188, -0.5898, -0.0381]],
("cuda", 8): [[1.0312, -0.5156, -0.3203], [-0.1201, 0.4375, 0.2402], [1.2188, -0.5898, -0.0396]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(logits[0, :3, :3], expected_slice, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(logits[1, :3, :3], expected_slice, atol=1e-3, rtol=1e-3)
def test_small_model_generation(self):
model_id = "hf-internal-testing/MiniMax-tiny"
dummy_input = torch.LongTensor([[0, 1, 0], [0, 1, 0]]).to(torch_device)
model = MiniMaxForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
).to(torch_device)
expected_slice = (
torch.tensor([[0, 1, 0, 933, 307, 3102, 2457, 1208], [0, 1, 0, 933, 307, 3102, 2457, 1208]])
.to(torch.int64)
.to(torch_device)
)
outputs = model.generate(dummy_input, max_new_tokens=5, do_sample=False)
torch.testing.assert_close(outputs, expected_slice, atol=1e-3, rtol=1e-3)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/minimax/test_modeling_minimax.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/colqwen2/configuration_colqwen2.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from typing import Any
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)
class ColQwen2Config(PreTrainedConfig):
r"""
Configuration class to store the configuration of a [`ColQ2en2ForRetrieval`]. It is used to instantiate an instance
of `ColQwen2ForRetrieval` according to the specified arguments, defining the model architecture following the methodology
from the "ColPali: Efficient Document Retrieval with Vision Language Models" paper.
Instantiating a configuration with the defaults will yield a similar configuration to the vision encoder used by the pre-trained
ColQwen2-v1.0 model, e.g. [vidore/colqwen2-v1.0-hf](https://huggingface.co/vidore/colqwen2-v1.0-hf).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vlm_config (`PreTrainedConfig`, *optional*):
Configuration of the VLM backbone model.
embedding_dim (`int`, *optional*, defaults to 128):
Dimension of the multi-vector embeddings produced by the model.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
from transformers.models.colqwen2 import ColQwen2Config, ColQwen2ForRetrieval
config = ColQwen2Config()
model = ColQwen2ForRetrieval(config)
```
"""
model_type = "colqwen2"
sub_configs: dict[str, Any] = {"vlm_config": PreTrainedConfig}
def __init__(
self,
vlm_config=None,
embedding_dim: int = 128,
initializer_range: float = 0.02,
**kwargs,
):
if vlm_config is None:
vlm_config = CONFIG_MAPPING["qwen2_vl"]()
logger.info(
"`vlm_config` is `None`. Initializing `vlm_config` with the `Qwen2VLConfig` with default values."
)
elif isinstance(vlm_config, dict):
vlm_config = deepcopy(vlm_config)
if "model_type" not in vlm_config:
raise KeyError(
"The `model_type` key is missing in the `vlm_config` dictionary. Please provide the model type."
)
vlm_config = CONFIG_MAPPING[vlm_config["model_type"]](**vlm_config)
elif not isinstance(vlm_config, PreTrainedConfig):
raise TypeError(
f"Invalid type for `vlm_config`. Expected `PreTrainedConfig`, `dict`, or `None`, but got {type(vlm_config)}."
)
if not hasattr(vlm_config, "vocab_size"):
vlm_config.vocab_size = vlm_config.get_text_config().vocab_size
self.vlm_config = vlm_config
self.embedding_dim = embedding_dim
self.initializer_range = initializer_range
super().__init__(**kwargs)
def get_text_config(self, *args, **kwargs) -> PreTrainedConfig:
return self.vlm_config.get_text_config(*args, **kwargs)
__all__ = ["ColQwen2Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/colqwen2/configuration_colqwen2.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert ColQwen2 weights from the original repository to the HF model format.
Don't forget to manually upload the processor-related files to the HF model repository
after running this script.
Original repository: https://github.com/illuin-tech/colqwen2.
NOTE: This script was originally run using `torch==2.5.1` and with:
```bash
python src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py \
--model_id vidore/colqwen2-v1.0-merged \
--revision eeccbae1d44bdcb0c83b1788127a2b2cad7d718e \
--original_vlm_name_or_path Qwen/Qwen2-VL-2B-Instruct \
--output_dir vidore/colqwen2-v1.0-hf-internal \
--push_to_hub
```
"""
import argparse
import glob
from pathlib import Path
from typing import Any
import torch
from huggingface_hub import snapshot_download
from peft import PeftModel
from safetensors import safe_open
from transformers import AutoConfig, AutoModel
from transformers.models.colqwen2 import ColQwen2ForRetrieval
from transformers.models.colqwen2.configuration_colqwen2 import ColQwen2Config
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
ORIGINAL_DTYPE = torch.bfloat16
def load_original_state_dict(model_id: str, revision: str | None = None) -> dict[str, torch.Tensor]:
directory_path = snapshot_download(
repo_id=model_id,
revision=revision,
allow_patterns=["*.safetensors"],
)
original_state_dict = {}
for path in glob.glob(f"{directory_path}/*"):
if path.endswith(".safetensors"):
with safe_open(path, framework="pt", device="cpu") as f:
for key in f.keys():
original_state_dict[key] = f.get_tensor(key)
# Some weights are tied, so `lm.head`` is not saved. Let's clone to load state dict.
if "lm_head.weight" not in original_state_dict and "model.embed_tokens.weight" in original_state_dict:
original_state_dict["lm_head.weight"] = original_state_dict["model.embed_tokens.weight"].clone()
return original_state_dict
def rename_state_dict_keys(state_dict: dict[str, Any]) -> dict[str, Any]:
new_state_dict: dict[str, Any] = {}
for key, value in state_dict.items():
if key.startswith("custom_text_proj"):
new_key = key.replace("custom_text_proj", "embedding_proj_layer")
else:
# The original ColQwen2 inherits from Qwen2VL, so we simply need to add the `vlm.` prefix
# to all remaining keys.
if key.startswith("model."):
key = key.replace("model.", "model.language_model.")
if key.startswith("visual."):
key = key.replace("visual.", "model.visual.")
new_key = "vlm." + key
new_state_dict[new_key] = value
return new_state_dict
@torch.no_grad()
def convert_colqwen2_weights_to_hf(
model_id: str,
output_dir: str,
push_to_hub: bool,
revision: str | None = None,
original_vlm_name_or_path: str | None = None,
):
# Load the original model data
original_config = AutoConfig.from_pretrained(
model_id,
revision=revision,
)
if original_vlm_name_or_path is not None:
original_config._name_or_path = original_vlm_name_or_path
if hasattr(original_config, "architectures"):
delattr(original_config, "architectures")
original_state_dict = load_original_state_dict(model_id, revision=revision)
# Format the state_dict keys
original_state_dict = rename_state_dict_keys(original_state_dict)
# Create the new config
config = ColQwen2Config(
vlm_config=original_config,
embedding_dim=128, # hardcoded in the original model
)
config.model_type = "colqwen2"
config.is_composition = False
# Load the untrained model
vlm_name_or_path = getattr(config.vlm_config, "_name_or_path", None)
if vlm_name_or_path and "2.5" in str(vlm_name_or_path):
print(
"Detected colqwen2.5 adapters in vlm_config; loading base model %s and merging PEFT weights."
% vlm_name_or_path
)
base_model = AutoModel.from_pretrained(
vlm_name_or_path,
device_map="cpu",
trust_remote_code=True,
)
peft_model = PeftModel.from_pretrained(base_model, model_id)
model = peft_model.merge_and_unload()
else:
model = ColQwen2ForRetrieval(config=config).to("cpu").eval()
print("Created model with new config and randomly initialized weights")
# NOTE: The new model was initialized with float32 weights. We need to convert it to the desired precision.
# There are two ways to set the model's dtype:
# - Using `model.from_pretrained(..., dtype=dtype_precision)` doesn't convert the hyperparameters to the desired precision.
# - Using `model.to(dtype_precision)` converts all values - including the hyperparameters - to the desired precision.
# The following snippet allows a fine-grained control over the model's dtype, making sure that all
# the new weights' dtypes match the original model.
for param in model.parameters():
param.data = param.data.to(ORIGINAL_DTYPE)
print(f"Converted the new model weights to `{ORIGINAL_DTYPE}`")
# Load the original weights
model.load_state_dict(original_state_dict)
print("Loaded original model weights")
# # Sanity check: ensure all keys are the same
state_dict_keys_old = set(original_state_dict.keys())
state_dict_keys_new = set(model.state_dict().keys())
disjoint_keys = state_dict_keys_old.symmetric_difference(state_dict_keys_new)
if disjoint_keys:
raise ValueError(f"Incompatible keys: {disjoint_keys}")
# Save the model
if push_to_hub:
model.push_to_hub(output_dir, private=True)
print(f"Model pushed to the hub at `{output_dir}`")
else:
Path(output_dir).mkdir(exist_ok=True, parents=True)
model.save_pretrained(output_dir)
print(f"Model saved to `{output_dir}`")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
This script converts the original ColQwen2 model to the HF model format.
Don't forget to manually upload the processor-related files to the HF model repository
after running this script.
Example usage:
```bash
python src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py \
--model_id vidore/colqwen2-v1.0-merged \
--revision eeccbae1d44bdcb0c83b1788127a2b2cad7d718e \
--original_vlm_name_or_path Qwen/Qwen2-VL-2B-Instruct \
--output_dir vidore/colqwen2-v1.0-hf-internal \
--push_to_hub
```
"""
)
parser.add_argument(
"--model_id",
help="Model ID of the original model to convert",
)
parser.add_argument(
"--output_dir",
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--push_to_hub",
help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally",
action="store_true",
default=False,
)
parser.add_argument(
"--revision",
help="Revision of the model to download",
default=None,
)
parser.add_argument(
"--original_vlm_name_or_path",
help="Name or path of the original VLM backbone model",
default=None,
)
args = parser.parse_args()
convert_colqwen2_weights_to_hf(
model_id=args.model_id,
output_dir=args.output_dir,
push_to_hub=args.push_to_hub,
revision=args.revision,
original_vlm_name_or_path=args.original_vlm_name_or_path,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/colqwen2/convert_colqwen2_weights_to_hf.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/colqwen2/modular_colqwen2.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from ...cache_utils import Cache
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, is_valid_image
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import ModelOutput, auto_docstring, can_return_tuple, is_torch_available, logging
from ..colpali.modeling_colpali import ColPaliForRetrieval, ColPaliPreTrainedModel
from ..colpali.processing_colpali import ColPaliProcessor
from .configuration_colqwen2 import ColQwen2Config
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class ColQwen2ProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": "longest",
},
"images_kwargs": {
"data_format": "channels_first",
"do_convert_rgb": True,
},
"common_kwargs": {"return_tensors": "pt"},
}
class ColQwen2Processor(ColPaliProcessor):
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
visual_prompt_prefix: str | None = None,
query_prefix: str | None = None,
**kwargs,
):
r"""
visual_prompt_prefix (`str`, *optional*, defaults to `"<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe the image.<|im_end|><|endoftext|>"`):
A string that gets tokenized and prepended to the image tokens.
query_prefix (`str`, *optional*, defaults to `"Query: "`):
A prefix to be used for the query.
"""
ProcessorMixin.__init__(self, image_processor, tokenizer, chat_template=chat_template)
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.visual_prompt_prefix = visual_prompt_prefix or (
"<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe the image.<|im_end|><|endoftext|>"
)
self.query_prefix = query_prefix or "Query: "
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[ColQwen2ProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
ColQwen2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
suffix = output_kwargs["text_kwargs"].pop("suffix", None)
return_token_type_ids = suffix is not None
if text is None and images is None:
raise ValueError("Either text or images must be provided")
if text is not None and images is not None:
raise ValueError("Only one of text or images can be processed at a time")
if images is not None:
if is_valid_image(images):
images = [images]
elif isinstance(images, list) and is_valid_image(images[0]):
pass
elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
raise ValueError("images must be an image, list of images or list of list of images")
texts_doc = [self.visual_prompt_prefix] * len(images)
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(texts_doc)):
while self.image_token in texts_doc[i]:
texts_doc[i] = texts_doc[i].replace(
self.image_token, "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length), 1
)
index += 1
texts_doc[i] = texts_doc[i].replace("<|placeholder|>", self.image_token)
text_inputs = self.tokenizer(
texts_doc,
return_token_type_ids=False,
**output_kwargs["text_kwargs"],
)
return_data = BatchFeature(data={**text_inputs, **image_inputs})
# NOTE: The following adjustment ensures correct behavior with DDP on multiple GPUs.
offsets = return_data["image_grid_thw"][:, 1] * return_data["image_grid_thw"][:, 2] # (batch_size,)
# Split the pixel_values tensor into a list of tensors, one per image
pixel_values = list(
torch.split(return_data["pixel_values"], offsets.tolist())
) # [(num_patches_image_0, pixel_values), ..., (num_patches_image_n, pixel_values)]
# Pad the list of pixel_value tensors to the same length along the sequence dimension
return_data["pixel_values"] = torch.nn.utils.rnn.pad_sequence(
pixel_values, batch_first=True
) # (batch_size, max_num_patches, pixel_values)
if return_token_type_ids:
labels = return_data["input_ids"].masked_fill(return_data["token_type_ids"] == 0, -100)
return_data.update({"labels": labels})
return return_data
elif text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, list) and isinstance(text[0], str)):
raise ValueError("Text must be a string or a list of strings")
if suffix is None:
suffix = self.query_augmentation_token * 10
texts_query: list[str] = []
for query in text:
augmented_query = self.query_prefix + query + suffix
texts_query.append(augmented_query)
batch_query = self.tokenizer(
texts_query,
return_token_type_ids=False,
**output_kwargs["text_kwargs"],
)
return batch_query
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = ColQwen2ProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# ColQwen doesn't process videos. Make a copy of list when removing
# otherwise `self.feature_extractor.model_input_names` is also modified
image_processor_input_names = [
name for name in image_processor_input_names if name not in ["pixel_values_videos", "video_grid_thw"]
]
return tokenizer_input_names + image_processor_input_names
class ColQwen2PreTrainedModel(ColPaliPreTrainedModel):
pass
@dataclass
@auto_docstring(
custom_intro="""
Base class for ColQwen2 embeddings output.
"""
)
class ColQwen2ForRetrievalOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The embeddings of the model.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
"""
loss: torch.FloatTensor | None = None
embeddings: torch.Tensor | None = None
past_key_values: Cache | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
@auto_docstring(
custom_intro="""
Following the ColPali approach, ColQwen2 leverages VLMs to construct efficient multi-vector embeddings directly
from document images (“screenshots”) for document retrieval. The model is trained to maximize the similarity
between these document embeddings and the corresponding query embeddings, using the late interaction method
introduced in ColBERT.
Using ColQwen2 removes the need for potentially complex and brittle layout recognition and OCR pipelines with
a single model that can take into account both the textual and visual content (layout, charts, ...) of a document.
ColQwen2 is part of the ColVision model family, which was introduced with ColPali in the following paper:
[*ColPali: Efficient Document Retrieval with Vision Language Models*](https://huggingface.co/papers/2407.01449).
"""
)
class ColQwen2ForRetrieval(ColPaliForRetrieval):
_checkpoint_conversion_mapping = {}
def __init__(self, config: ColQwen2Config):
super().__init__(config)
del self._tied_weights_keys
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
labels: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
pixel_values: torch.Tensor | None = None,
image_grid_thw: torch.LongTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> ColQwen2ForRetrievalOutput:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
# Handle the custom "pixel_values" input obtained with `ColQwen2Processor` through unpadding
if pixel_values is not None and image_grid_thw is not None:
# NOTE: image_grid_thw: (batch_size, 3) where image_grid_thw[i] = (num_patches_h, num_patches_w, temporal_patch_size)
offsets = image_grid_thw[:, 1] * image_grid_thw[:, 2] # (batch_size,)
arange = torch.arange(pixel_values.shape[1], device=offsets.device) # (max_len,)
mask = arange.unsqueeze(0) < offsets.unsqueeze(1) # (batch_size, max_len)
pixel_values = pixel_values[mask] # (total_valid_patches, channels, height, width)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Custom data preparation to fix an issue with the gradient flow when training with multiple GPUs.
if inputs_embeds is None:
inputs_embeds = self.vlm.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_embeds = self.vlm.model.visual(
pixel_values, grid_thw=image_grid_thw, return_dict=True
).pooler_output
image_mask = (
(input_ids == self.config.vlm_config.image_token_id).unsqueeze(-1).expand_as(inputs_embeds)
)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
vlm_output = self.vlm.model(
input_ids=None,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
vlm_hidden_states = vlm_output.hidden_states if output_hidden_states else None
last_hidden_states = vlm_output[0] # (batch_size, sequence_length, hidden_size)
proj_dtype = self.embedding_proj_layer.weight.dtype
embeddings = self.embedding_proj_layer(last_hidden_states.to(proj_dtype)) # (batch_size, sequence_length, dim)
# L2 normalization
embeddings = embeddings / embeddings.norm(dim=-1, keepdim=True) # (batch_size, sequence_length, dim)
if attention_mask is not None:
embeddings = embeddings * attention_mask.unsqueeze(-1) # (batch_size, sequence_length, dim)
return ColQwen2ForRetrievalOutput(
embeddings=embeddings,
past_key_values=vlm_output.past_key_values,
hidden_states=vlm_hidden_states,
attentions=vlm_output.attentions,
)
__all__ = [
"ColQwen2ForRetrieval",
"ColQwen2PreTrainedModel",
"ColQwen2Processor",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/colqwen2/modular_colqwen2.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/colqwen2/test_modeling_colqwen2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ColQwen2 model."""
import unittest
from typing import ClassVar
import pytest
import torch
from datasets import load_dataset
from tests.test_configuration_common import ConfigTester
from tests.test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from transformers import BitsAndBytesConfig, is_torch_available
from transformers.models.colqwen2.configuration_colqwen2 import ColQwen2Config
from transformers.models.colqwen2.modeling_colqwen2 import ColQwen2ForRetrieval, ColQwen2ForRetrievalOutput
from transformers.models.colqwen2.processing_colqwen2 import ColQwen2Processor
from transformers.testing_utils import (
Expectations,
cleanup,
require_bitsandbytes,
require_torch,
require_vision,
slow,
torch_device,
)
if is_torch_available():
import torch
class ColQwen2ForRetrievalModelTester:
def __init__(
self,
parent,
ignore_index=-100,
pad_token_id=2,
projector_hidden_act="gelu",
seq_length=11,
vision_feature_select_strategy="default",
vision_feature_layer=-1,
projection_dim=32,
is_training=False,
use_cache=False,
vlm_config={
"_name_or_path": "Qwen/Qwen2-VL-2B-Instruct",
"bos_token_id": 0,
"eos_token_id": 1,
"vision_start_token_id": 3,
"image_token_id": 4,
"video_token_id": 5,
"hidden_size": 64,
"intermediate_size": 2,
"max_window_layers": 2,
"model_type": "qwen2_vl",
"num_attention_heads": 2,
"num_hidden_layers": 2,
"num_key_value_heads": 2,
"rms_norm_eps": 1e-06,
"rope_parameters": {"mrope_section": [4, 6, 6], "rope_type": "default", "type": "default"},
"sliding_window": 32768,
"tie_word_embeddings": True,
"vision_config": {
"depth": 2,
"embed_dim": 32,
"hidden_act": "quick_gelu",
"hidden_size": 64,
"mlp_ratio": 4,
"num_heads": 4,
"patch_size": 14,
"in_chans": 3,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
"vision_end_token_id": 151653,
"vision_token_id": 151654,
"vocab_size": 99,
},
embedding_dim=32,
initializer_range=0.02,
):
self.parent = parent
self.ignore_index = ignore_index
self.pad_token_id = pad_token_id
# `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify
self.image_token_index = 0
self.image_token_id = vlm_config["image_token_id"]
self.video_token_id = vlm_config["video_token_id"]
self.pad_token_id = vlm_config["eos_token_id"]
self.vision_start_token_id = vlm_config["vision_start_token_id"]
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.image_size = 56
self.num_image_tokens = 4
self.seq_length = seq_length + self.num_image_tokens
self.projection_dim = projection_dim
self.num_hidden_layers = vlm_config["num_hidden_layers"]
self.vocab_size = vlm_config["vocab_size"]
self.hidden_size = vlm_config["hidden_size"]
self.num_attention_heads = vlm_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = vlm_config["vision_config"]["in_chans"]
self.encoder_seq_length = self.seq_length
self.use_cache = use_cache
self.vlm_config = vlm_config
self.embedding_dim = embedding_dim
self.initializer_range = initializer_range
def get_config(self):
return ColQwen2Config(
vlm_config=self.vlm_config,
embedding_dim=self.embedding_dim,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vlm_config.vision_config.patch_size
temporal_patch_size = config.vlm_config.vision_config.temporal_patch_size
# NOTE: Assume all inputs are square images of the same size.
num_patches = (self.image_size // patch_size) ** 2
pixel_values = floats_tensor(
[
self.batch_size * num_patches,
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
# Hardcoded image grid size: do not change unless you modified image size or patch size!
image_grid_thw = torch.tensor([1, 4, 4]).repeat(self.batch_size, 1)
# NOTE: The following adjustment ensures correct behavior with DDP on multiple GPUs.
# Line is copied from `src/transformers/models/colqwen2/processing_colqwen2.py`
offsets = image_grid_thw[:, 1] * image_grid_thw[:, 2] # (batch_size,)
pixel_values = list(
torch.split(pixel_values, offsets.tolist())
) # [(num_patches_image_0, pixel_values), ..., (num_patches_image_n, pixel_values)]
pixel_values = torch.nn.utils.rnn.pad_sequence(
pixel_values, batch_first=True
) # (batch_size, max_num_patches, pixel_values)
return config, pixel_values, image_grid_thw
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, image_grid_thw = config_and_inputs
input_ids = (
ids_tensor(
shape=[self.batch_size, self.seq_length],
vocab_size=config.vlm_config.vocab_size - 1,
)
+ 1
)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, -1] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = self.image_token_id
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id
inputs_dict = {
"input_ids": input_ids,
"pixel_values": pixel_values,
"image_grid_thw": image_grid_thw,
"attention_mask": attention_mask,
"labels": input_ids,
}
return config, inputs_dict
@require_torch
class ColQwen2ForRetrievalModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `ColQwen2ForRetrieval`.
"""
all_model_classes = (ColQwen2ForRetrieval,) if is_torch_available() else ()
test_resize_embeddings = True
test_torch_exportable = False
def setUp(self):
self.model_tester = ColQwen2ForRetrievalModelTester(self)
self.config_tester = ConfigTester(self, config_class=ColQwen2Config, has_text_modality=False)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
self.assertTrue(torch.allclose(out_embeds, out_ids))
@slow
@require_vision
def test_colqwen2_forward_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
with torch.no_grad():
outputs = model(**inputs, return_dict=True)
self.assertIsInstance(outputs, ColQwen2ForRetrievalOutput)
@unittest.skip(reason="Some undefined behavior encountered with test versions of Qwen2-VL. Skip for now.")
def test_model_parallelism(self):
pass
@unittest.skip(reason="Pass because ColQwen2 requires `attention_mask is not None`")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Pass because ColQwen2 requires `attention_mask is not None`")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="This architecture doesn't support weight tying/untying.")
def test_load_save_without_tied_weights(self):
pass
@require_torch
class ColQwen2ModelIntegrationTest(unittest.TestCase):
model_name: ClassVar[str] = "vidore/colqwen2-v1.0-hf"
def setUp(self):
self.processor = ColQwen2Processor.from_pretrained(self.model_name)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_bitsandbytes
@slow
def test_model_integration_test(self):
"""
Test if the model is able to retrieve the correct pages for a small and easy dataset.
"""
model = ColQwen2ForRetrieval.from_pretrained(
self.model_name,
dtype=torch.float16,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
).eval()
# Load the test dataset
ds = load_dataset("hf-internal-testing/document-visual-retrieval-test", split="test")
# Preprocess the examples
batch_images = self.processor(images=ds["image"]).to(torch_device)
batch_queries = self.processor(text=ds["query"]).to(torch_device)
# Run inference
with torch.inference_mode():
image_embeddings = model(**batch_images).embeddings
query_embeddings = model(**batch_queries).embeddings
# Compute retrieval scores
scores = self.processor.score_retrieval(
query_embeddings=query_embeddings,
passage_embeddings=image_embeddings,
) # (num_queries, num_passages)
assert scores.ndim == 2, f"Expected 2D tensor, got {scores.ndim}"
assert scores.shape == (len(ds), len(ds)), f"Expected shape {(len(ds), len(ds))}, got {scores.shape}"
# Check if the maximum scores per row are in the diagonal of the matrix score
self.assertTrue((scores.argmax(axis=1) == torch.arange(len(ds), device=scores.device)).all())
# Further validation: fine-grained check, with a hardcoded score from the original Hf implementation.
expectations = Expectations(
{
("cuda", 7): [
[15.0938, 8.3203, 15.0391],
[9.6328, 16.9062, 10.5312],
[15.6562, 12.2656, 20.2969],
],
("cuda", 8): [
[16.2812, 8.3672, 14.5703],
[9.4922, 17.1875, 10.3281],
[15.0312, 11.3984, 20.1719],
],
}
)
expected_scores = torch.tensor(expectations.get_expectation(), dtype=scores.dtype)
assert torch.allclose(scores, expected_scores, atol=1e-3), f"Expected scores {expected_scores}, got {scores}"
@slow
def test_model_integration_test_2(self):
"""
Test if the model is able to retrieve the correct pages for a small and easy dataset.
This test uses a ColQwen2.5 checkpoint that is compatible with the ColQwen2 architecture.
"""
model = ColQwen2ForRetrieval.from_pretrained(
"Sahil-Kabir/colqwen2.5-v0.2-hf",
device_map=torch_device,
dtype=torch.bfloat16,
).eval()
processor = ColQwen2Processor.from_pretrained("Sahil-Kabir/colqwen2.5-v0.2-hf", trust_remote_code=True)
# Load the test dataset
ds = load_dataset("hf-internal-testing/document-visual-retrieval-test", split="test")
# Preprocess the examples
batch_images = processor(images=list(ds["image"])).to(torch_device)
batch_queries = processor(text=list(ds["query"])).to(torch_device)
with torch.inference_mode():
image_embeddings = model(**batch_images).embeddings
query_embeddings = model(**batch_queries).embeddings
# Compute retrieval scores
scores = processor.score_retrieval(
query_embeddings=query_embeddings,
passage_embeddings=image_embeddings,
)
assert scores.ndim == 2, f"Expected 2D tensor, got {scores.ndim}"
assert scores.shape == (len(ds), len(ds)), f"Expected shape {(len(ds), len(ds))}, got {scores.shape}"
# Check if the maximum scores per row are in the diagonal of the matrix score
self.assertTrue((scores.argmax(axis=1) == torch.arange(len(ds), device=scores.device)).all())
# Further validation: fine-grained check, with a hardcoded score from the original Hf implementation.
expectations = Expectations(
{
("cuda", 8): [
[16.3750, 10.9375, 14.7500],
[11.3750, 16.8750, 12.0625],
[15.3125, 13.1250, 21.5000],
]
}
)
expected_scores = torch.tensor(expectations.get_expectation(), dtype=scores.dtype)
assert torch.allclose(scores, expected_scores, atol=0.15), f"Expected scores {expected_scores}, got {scores}"
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/colqwen2/test_modeling_colqwen2.py",
"license": "Apache License 2.0",
"lines": 329,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/colqwen2/test_processing_colqwen2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the ColQwen2 processor."""
import unittest
import torch
from parameterized import parameterized
from transformers.models.colqwen2.processing_colqwen2 import ColQwen2Processor
from transformers.testing_utils import get_tests_dir, require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
ColQwen2Processor,
)
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
@require_torch
@require_vision
class ColQwen2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = ColQwen2Processor
model_id = "vidore/colqwen2-v1.0-hf"
@parameterized.expand([(1, "pt"), (2, "pt")])
@unittest.skip("Not tested before, to investigate")
def test_apply_chat_template_image(self, batch_size, return_tensors):
pass
@unittest.skip("ColQwen2Processor can only process one of text or images at a time")
def test_processor_with_multiple_inputs(self):
pass
@unittest.skip("ColQwen2Processor adds a prefix and suffix to the text")
def test_tokenizer_defaults(self):
pass
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
def test_process_images(self):
# Processor configuration
image_input = self.prepare_image_inputs()
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the image
batch_feature = processor.process_images(images=image_input, return_tensors="pt")
# Assertions
self.assertIn("pixel_values", batch_feature)
self.assertEqual(batch_feature["pixel_values"].shape, torch.Size([1, 56, 1176]))
def test_process_queries(self):
# Inputs
queries = [
"Is attention really all you need?",
"Are Benjamin, Antoine, Merve, and Jo best friends?",
]
# Processor configuration
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the image
batch_feature = processor.process_queries(text=queries, return_tensors="pt")
# Assertions
self.assertIn("input_ids", batch_feature)
self.assertIsInstance(batch_feature["input_ids"], torch.Tensor)
self.assertEqual(batch_feature["input_ids"].shape[0], len(queries))
# The following tests override the parent tests because ColQwen2Processor can only take one of images or text as input at a time.
def test_tokenizer_defaults_preserved_by_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(text=input_str, return_tensors="pt")
self.assertEqual(inputs[self.text_input_name].shape[-1], 117)
def test_image_processor_defaults_preserved_by_image_kwargs(self):
"""
We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor.
We then check that the mean of the pixel_values is less than or equal to 0 after processing.
Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
"""
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=-1.0
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_kwargs_overrides_default_tokenizer_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding="longest")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(text=input_str, return_tensors="pt", max_length=112, padding="max_length")
self.assertEqual(inputs[self.text_input_name].shape[-1], 112)
def test_kwargs_overrides_default_image_processor_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1.0, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_unstructured_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
inputs = processor(
text=input_str,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="max_length",
max_length=76,
)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="longest",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_doubly_passed_kwargs(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
with self.assertRaises(ValueError):
_ = processor(
images=image_input,
images_kwargs={"do_rescale": True, "rescale_factor": -1.0},
do_rescale=True,
return_tensors="pt",
)
def test_structured_kwargs_nested(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1.0},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_structured_kwargs_nested_from_dict(self):
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1.0},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(images=image_input, **all_kwargs)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
# Can process only text or images at a time
def test_model_input_names(self):
processor = self.get_processor()
image_input = self.prepare_image_inputs()
inputs = processor(images=image_input)
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
@unittest.skip("ColQwen2Processor can't process text+image inputs at the same time")
def test_processor_text_has_no_visual(self):
pass
@unittest.skip("ColQwen2Processor adds a batch dimension to the pixel_values")
def test_image_processor_defaults(self):
pass
@unittest.skip("ColQwen2Processor can't process text+image inputs at the same time")
def test_get_num_multimodal_tokens_matches_processor_call(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/colqwen2/test_processing_colqwen2.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/causal_lm_tester.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from inspect import signature
import pytest
from parameterized import parameterized
from transformers import AutoModelForCausalLM, PreTrainedConfig, set_seed
from transformers.models.auto.auto_factory import getattribute_from_module
from transformers.testing_utils import (
_COMMON_MODEL_NAMES_MAP,
is_flaky,
require_flash_attn,
require_torch_accelerator,
slow,
)
from .test_configuration_common import ConfigTester
from .test_modeling_common import (
GenerationTesterMixin,
ModelTesterMixin,
ids_tensor,
is_torch_available,
require_torch,
torch_device,
)
from .test_pipeline_mixin import PipelineTesterMixin
from .test_training_mixin import TrainingTesterMixin
if is_torch_available():
import torch
class CausalLMModelTester:
# If the model follows the standard naming conventions, only `base_model_class` needs to be set (the others are
# inferred from available public classes).
base_model_class = None
# ⚠️ Don't set these unless the model does NOT follow the standard naming conventions ⚠️
config_class = None
causal_lm_class = None
question_answering_class = None
sequence_classification_class = None
token_classification_class = None
# These attributes are required after the initialization phase of the tester.
_required_attributes = ("base_model_class", "config_class", "causal_lm_class")
# Arguments that should be passed to the config class even if not in its signature
forced_config_args = ["pad_token_id"]
@classmethod
def _verify_and_infer_model_attributes(cls):
"""
Verifies that the required tester attributes are set correctly, and infers unset tester attributes.
Intentionally nitpicks the tester class attributes, to prevent human errors.
"""
# `base_model_class` is mandatory, and it must be a valid model class.
base_model_class = getattr(cls, "base_model_class")
if base_model_class is None or "PreTrainedModel" not in str(base_model_class.__mro__):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the `base_model_class` "
f"attribute to a valid model class. (It's set to `{base_model_class}`)"
)
# Infers other model classes from the base class name and available public classes, if the corresponding
# attributes are not set explicitly. If they are set, they must be set to a valid class (config or model).
model_name = base_model_class.__name__.replace("Model", "")
base_class_module = ".".join(base_model_class.__module__.split(".")[:-1])
for tester_attribute_name, model_class_termination in _COMMON_MODEL_NAMES_MAP.items():
if getattr(cls, tester_attribute_name) is None:
try:
model_class = getattribute_from_module(base_class_module, model_name + model_class_termination)
setattr(cls, tester_attribute_name, model_class)
except ValueError:
pass
else:
if tester_attribute_name == "config_class":
if "PreTrainedConfig" not in str(getattr(cls, tester_attribute_name).__mro__):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the "
f"`{tester_attribute_name}` attribute to a valid config class. (It's set to "
f"`{getattr(cls, tester_attribute_name)}`). If the config class follows a standard "
f"naming convention, you should unset `{tester_attribute_name}`."
)
else:
if "PreTrainedModel" not in str(getattr(cls, tester_attribute_name).__mro__):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the "
f"`{tester_attribute_name}` attribute to a valid model class. (It's set to "
f"`{getattr(cls, tester_attribute_name)}`). If the model class follows a standard "
f"naming convention, you should unset `{tester_attribute_name}`."
)
# After inferring, if we don't have the basic classes set, we raise an error.
for required_attribute in cls._required_attributes:
if getattr(cls, required_attribute) is None:
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the `{required_attribute}` "
"attribute. It can't be automatically inferred either -- this means it is not following a "
"standard naming convention. If this is intentional, please set the attribute explicitly."
)
# To prevent issues with typos, no other attributes can be set to a model class
for instance_attribute_name, instance_attribute in cls.__dict__.items():
if (
(
instance_attribute_name not in _COMMON_MODEL_NAMES_MAP
and instance_attribute_name != "base_model_class"
)
and isinstance(instance_attribute, type)
and "PreTrainedModel" in str(instance_attribute.__mro__)
):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but set an unexpected attribute to a model class "
f"(`{instance_attribute_name}` is set to `{instance_attribute}`). "
f"Only the following attributes can be set to model classes: {_COMMON_MODEL_NAMES_MAP.keys()}."
)
@property
def all_model_classes(self):
# Models that set `all_model_classes` in their `XXXModelTest` class must have a new class that doesn't fit
# any of the common classes.
return [
model_class
for model_class in (
self.base_model_class,
self.causal_lm_class,
self.question_answering_class,
self.sequence_classification_class,
self.token_classification_class,
)
if model_class is not None
]
@property
def pipeline_model_mapping(self):
# This is the default pipeline mapping.
mapping = {
"feature-extraction": self.base_model_class,
"text-generation": self.causal_lm_class,
}
if self.question_answering_class is not None:
mapping["question-answering"] = self.question_answering_class
if self.sequence_classification_class is not None:
mapping["text-classification"] = self.sequence_classification_class
if self.token_classification_class is not None:
mapping["token-classification"] = self.token_classification_class
if self.sequence_classification_class is not None:
mapping["zero-shot"] = self.sequence_classification_class
return mapping
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=32,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
is_decoder=False,
scope=None,
expert_interval=1,
moe_layer_start_index=0,
moe_intermediate_size=16,
shared_expert_intermediate_size=36,
shared_expert_gate=True,
moe_num_shared_experts=2,
num_experts_per_tok=2,
num_experts=8,
mamba_n_groups=1,
mamba_n_heads=16,
mamba_d_state=16,
mamba_d_conv=4,
mamba_expand=2,
mamba_chunk_size=16,
):
self._verify_and_infer_model_attributes()
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.scope = scope
self.head_dim = self.hidden_size // self.num_attention_heads
self.is_decoder = is_decoder
self.expert_interval = expert_interval
self.moe_layer_start_index = moe_layer_start_index
self.moe_intermediate_size = moe_intermediate_size
self.shared_expert_intermediate_size = shared_expert_intermediate_size
self.shared_expert_gate = shared_expert_gate
self.moe_num_shared_experts = moe_num_shared_experts
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.mamba_n_groups = mamba_n_groups
self.mamba_n_heads = mamba_n_heads
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
self.tie_word_embeddings = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
@property
def config_args(self):
return list(signature(self.config_class.__init__).parameters.keys())
def get_config(self):
kwargs = {}
model_name_to_common_name = {v: k for k, v in self.config_class.attribute_map.items()}
for k in self.config_args + self.forced_config_args:
if hasattr(self, k) and k != "self":
kwargs[k] = getattr(self, k)
elif k in model_name_to_common_name and hasattr(self, model_name_to_common_name[k]):
kwargs[k] = getattr(self, model_name_to_common_name[k])
return self.config_class(**kwargs)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = self.base_model_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config, input_ids, _, input_mask, _, _, _ = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class CausalLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, TrainingTesterMixin):
model_tester_class = None
all_model_classes = None
pipeline_model_mapping = None
def setUp(self):
if self.model_tester_class is None:
raise ValueError(
"You have inherited from CausalLMModelTest but did not set the model_tester_class attribute."
)
self.model_tester = self.model_tester_class(self)
self.config_tester = ConfigTester(self, config_class=self.model_tester.config_class)
if self.pipeline_model_mapping is None:
# If `all_model_classes` is not the default, maybe there are more pipeline mappings to be set.
if self.all_model_classes is not None:
raise ValueError(
"Testes that inherit from `CausalLMModelTest` and set `all_model_classes` must manually set "
"`pipeline_model_mapping`."
)
# Otherwise, we know the pipeline mapping is the default.
else:
self.pipeline_model_mapping = self.model_tester.pipeline_model_mapping
if self.all_model_classes is None:
self.all_model_classes = self.model_tester.all_model_classes
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_sequence_classification_model(self):
if self.model_tester.sequence_classification_class is None:
self.skipTest("Model does not support sequence classification")
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = self.model_tester.sequence_classification_class(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_sequence_classification_model_for_single_label(self):
if self.model_tester.sequence_classification_class is None:
self.skipTest("Model does not support sequence classification")
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "single_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = self.model_tester.sequence_classification_class(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_sequence_classification_model_for_multi_label(self):
if self.model_tester.sequence_classification_class is None:
self.skipTest("Model does not support sequence classification")
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = self.model_tester.sequence_classification_class(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_token_classification_model(self):
if self.model_tester.token_classification_class is None:
self.skipTest("Model does not support token classification")
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels)
model = self.model_tester.token_classification_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=token_labels)
self.assertEqual(
result.logits.shape,
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
def test_question_answering_model(self):
if self.model_tester.question_answering_class is None:
self.skipTest("Model does not support question answering")
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = self.model_tester.question_answering_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(
result.start_logits.shape,
(self.model_tester.batch_size, self.model_tester.seq_length),
)
self.assertEqual(
result.end_logits.shape,
(self.model_tester.batch_size, self.model_tester.seq_length),
)
@parameterized.expand([("linear",), ("dynamic",), ("yarn",)])
def test_model_rope_scaling_from_config(self, scaling_type):
"""
Tests that we can initialize a model with RoPE scaling in the config, that it can run a forward pass, and
that a few basic model output properties are honored.
"""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
if not _config_supports_rope_scaling(config):
self.skipTest("This model does not support RoPE scaling")
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
short_input = ids_tensor([1, 10], config.vocab_size)
long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
_set_config_rope_params(
config,
{
"rope_type": "default",
"rope_theta": 10_000.0,
"partial_rotary_factor": partial_rotary_factor,
"original_max_position_embeddings": 16384,
},
)
original_model = self.model_tester_class.base_model_class(config)
original_model.to(torch_device)
original_model.eval()
original_short_output = original_model(short_input).last_hidden_state
original_long_output = original_model(long_input).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
_set_config_rope_params(
config,
{
"rope_type": scaling_type,
"factor": 10.0,
"rope_theta": 10_000.0,
"partial_rotary_factor": partial_rotary_factor,
},
)
scaled_model = self.model_tester_class.base_model_class(config)
scaled_model.to(torch_device)
scaled_model.eval()
scaled_short_output = scaled_model(short_input).last_hidden_state
scaled_long_output = scaled_model(long_input).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5)
else:
self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5))
def test_model_rope_scaling_frequencies(self):
"""Tests the frequency properties of the different RoPE scaling types on the model RoPE layer."""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
if not _config_supports_rope_scaling(config):
self.skipTest("This model does not support RoPE scaling")
# Retrieves the RoPE layer class from the base model class. Uses `.named_modules()` to avoid hardcoding the
# named location of the RoPE layer class.
base_model = self.model_tester.base_model_class(config)
possible_rope_attributes = [
"pos_emb",
"rotary_emb", # most common case
"global_rotary_emb",
"local_rotary_emb",
]
for name, module in base_model.named_modules():
if any(potential_name in name for potential_name in possible_rope_attributes):
rope_class = type(module)
break
scaling_factor = 10
short_input_length = 10
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
long_input_length = int(config.max_position_embeddings * 1.5)
# Inputs
x = torch.randn(
1, dtype=torch.float32, device=torch_device
) # used exclusively to get the dtype and the device
position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device)
position_ids_short = position_ids_short.unsqueeze(0)
position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device)
position_ids_long = position_ids_long.unsqueeze(0)
# Sanity check original RoPE
_set_config_rope_params(
config, {"rope_type": "default", "rope_theta": 10_000.0, "partial_rotary_factor": partial_rotary_factor}
)
original_rope = rope_class(config=config).to(torch_device)
original_cos_short, original_sin_short = original_rope(x, position_ids_short)
original_cos_long, original_sin_long = original_rope(x, position_ids_long)
torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :])
torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :])
# Sanity check linear RoPE scaling
# New position "x" should match original position with index "x/scaling_factor"
_set_config_rope_params(
config,
{
"rope_type": "linear",
"factor": scaling_factor,
"rope_theta": 10_000.0,
"partial_rotary_factor": partial_rotary_factor,
},
)
linear_scaling_rope = rope_class(config=config).to(torch_device)
linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short)
linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long)
torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :])
torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :])
for new_position in range(0, long_input_length, scaling_factor):
original_position = int(new_position // scaling_factor)
torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :])
torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :])
# Sanity check Dynamic NTK RoPE scaling
# Scaling should only be observed after a long input is fed. We can observe that the frequencies increase
# with scaling_factor (or that `inv_freq` decreases)
_set_config_rope_params(
config,
{
"rope_type": "dynamic",
"factor": scaling_factor,
"rope_theta": 10_000.0,
"partial_rotary_factor": partial_rotary_factor,
},
)
ntk_scaling_rope = rope_class(config=config).to(torch_device)
ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short)
ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long)
torch.testing.assert_close(ntk_cos_short, original_cos_short)
torch.testing.assert_close(ntk_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(ntk_sin_long, original_sin_long)
self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all())
# Sanity check Yarn RoPE scaling
# Scaling should be over the entire input
_set_config_rope_params(
config,
{
"rope_type": "yarn",
"factor": scaling_factor,
"rope_theta": 10_000.0,
"partial_rotary_factor": partial_rotary_factor,
},
)
yarn_scaling_rope = rope_class(config=config).to(torch_device)
yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short)
yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long)
torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :])
torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :])
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_short, original_cos_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_short, original_sin_short)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_cos_long, original_cos_long)
with self.assertRaises(AssertionError):
torch.testing.assert_close(yarn_sin_long, original_sin_long)
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@is_flaky()
@slow
def test_flash_attn_2_equivalence(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(reason="Model does not support Flash Attention 2")
# Set seed for deterministic test - ensures reproducible model initialization and inputs
set_seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16, attn_implementation="eager")
model.to(torch_device)
dummy_input = inputs_dict[model_class.main_input_name]
dummy_input = dummy_input.to(torch_device)
outputs = model(dummy_input, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, output_hidden_states=True)
logits = outputs.hidden_states[-1]
logits_fa = outputs_fa.hidden_states[-1]
torch.testing.assert_close(logits_fa, logits, atol=3e-2, rtol=3e-2)
def test_causal_lm_can_accept_training_kwargs(self):
if not getattr(self.model_tester, "is_training", False):
self.skipTest(reason="ModelTester is not configured to run training tests")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
with tempfile.TemporaryDirectory() as tmpdir:
with torch.device(torch_device):
model_eager = AutoModelForCausalLM.from_config(config, dtype=torch.float32)
model_eager.save_pretrained(tmpdir)
model = AutoModelForCausalLM.from_pretrained(tmpdir, dtype=torch.float32, device_map=torch_device)
inputs_dict["num_items_in_batch"] = torch.tensor(inputs_dict["input_ids"].shape[0])
inputs_dict["labels"] = inputs_dict["input_ids"]
_ = model(**inputs_dict, return_dict=False)
def _config_supports_rope_scaling(config: PreTrainedConfig) -> bool:
"""Returns whether a certain model config supports RoPE scaling parameterization."""
# Has rope_scaling -> model was designed with rope scaling in mind
# Has rope_theta (and no rope_scaling) -> probably an older model, but should support rope scaling as well
main_config_has_rope = hasattr(config, "rope_parameters")
sub_config_has_rope = any(
hasattr(getattr(config, sub_config), "rope_parameters") for sub_config in config.sub_configs.keys()
)
return main_config_has_rope or sub_config_has_rope
def _set_config_rope_params(config: PreTrainedConfig, rope_params: dict) -> bool:
"""Recursively sets RoPE parameters on configs and subconfigs, by duplicating the same RoPE values."""
config.rope_parameters = getattr(config, "rope_parameters", {}) or {}
config.rope_parameters.update(rope_params)
if any(name in config.__class__.__name__.lower() for name in ["gemma3", "modernbert"]):
config.rope_parameters = {layer_type: config.rope_parameters.copy() for layer_type in config.layer_types}
for sub_config in config.sub_configs.keys():
_set_config_rope_params(getattr(config, sub_config), rope_params)
return config
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/causal_lm_tester.py",
"license": "Apache License 2.0",
"lines": 598,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:examples/metrics-monitoring/metrics_example.py | # Example usage of the trace and attach_tracer decorators
from transformers.utils.metrics import attach_tracer, traced
@attach_tracer()
class ExampleClass:
def __init__(self, name):
# The attach_tracer decorator has already created self.tracer for us
self.name = name
@traced # This method will use the tracer from the class instance
def process_data(self, data):
# This method is traced and can use self.tracer
return f"Processed {data} with {self.name}"
@traced(span_name="custom_operation") # With custom span name
def special_operation(self, value):
# Also traced, with a custom span name
return value * 2
@traced(
additional_attributes=[
("name", "object.name", lambda x: x.upper()), # Using a transform function
("name", "object.fixed_value", "static_value"), # Using a fixed value
]
)
def operation_with_attributes(self):
# This will add the specified attributes to the span
return "Operation completed"
# For functions without a class, the traced decorator still works
@traced
def standalone_function(arg1, arg2):
# For functions, a tracer is created based on the module name
return arg1 + arg2
# Usage:
if __name__ == "__main__":
# With OpenTelemetry configured, these will produce traces
example = ExampleClass("test_object")
example.process_data("sample")
example.special_operation(42)
example.operation_with_attributes()
result = standalone_function(1, 2)
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/metrics-monitoring/metrics_example.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:examples/pytorch/continuous_batching.py | # Copyright 2025 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import contextlib
import json
import logging
import os
import time
from itertools import cycle
import datasets
import torch
from torch.profiler import ProfilerActivity, profile
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer, CompileConfig
from transformers.generation import GenerationConfig
from transformers.generation.continuous_batching.requests import logger
def generate_without_cb(
model_id: str, sliding_window: int, attn_impl: str, batched_inputs: list[int], generation_config: GenerationConfig
) -> dict[str, str]:
# Setup model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, attn_implementation=attn_impl)
model = model.cuda().eval()
if sliding_window > 0 and getattr(model.config, "sliding_window", None) is not None:
model.config.sliding_window = sliding_window
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Generate one by one
decoded_outputs = {}
for input_ids in tqdm(batched_inputs, desc="Generating outputs without CB"):
key = " ".join(map(str, input_ids)) # This will be used to identify the output after batched generation
input_ids = torch.tensor([input_ids]).to("cuda")
attention_mask = torch.ones_like(input_ids)
outputs = model.generate(input_ids, attention_mask=attention_mask, generation_config=generation_config)
generated_tokens = outputs[0][input_ids.shape[1] :]
decoded_outputs[key] = tokenizer.decode(generated_tokens, skip_special_tokens=False)
return decoded_outputs
def maybe_setup_metrics(use_metrics: bool) -> None:
if not use_metrics:
return
try:
from opentelemetry import metrics, trace
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
resource = Resource.create({"service.name": "transformers"})
metrics_exporter = PeriodicExportingMetricReader(
OTLPMetricExporter(
endpoint="http://localhost:9090/api/v1/otlp/v1/metrics"
), # Uses OTEL_EXPORTER_OTLP_METRICS_ENDPOINT env var
export_interval_millis=1000,
)
meter_provider = MeterProvider(resource=resource, metric_readers=[metrics_exporter])
metrics.set_meter_provider(meter_provider)
trace_exporter = OTLPSpanExporter(
endpoint="http://localhost:4318/v1/traces"
) # Uses OTEL_EXPORTER_OTLP_TRACES_ENDPOINT env var
tracer_provider = TracerProvider(resource=resource)
tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
trace.set_tracer_provider(tracer_provider)
except Exception as e:
print(f"Error setting up metrics: {e}")
def batch_generate(
model: AutoModelForCausalLM,
simple_batch_inputs: list,
generation_config: GenerationConfig,
tokenizer: AutoTokenizer,
displayed_samples: int = 0, # -1: no display, 0: display stats, >0: display inputs and some outputs
output_file: str | None = None,
expected_outputs: list[str] | None = None,
use_async: bool | None = None,
) -> tuple[float, float]:
# Actual batch generation
if displayed_samples >= 0:
print("--- Running CB Generation Example ---")
start_time_simple = time.time()
batch_outputs = model.generate_batch(
inputs=simple_batch_inputs,
generation_config=generation_config,
use_async=use_async,
)
end_time_simple = time.time()
if displayed_samples >= 0:
print("Done with batch generation.")
# Decode outputs
token_count = 0
data = []
for i, request in enumerate(batch_outputs):
input_text = tokenizer.decode(batch_outputs[request].prompt_ids, skip_special_tokens=False)
# The key is used to tie back to the output of unbatched generation
key = " ".join(map(str, batch_outputs[request].prompt_ids))
data.append({"input": input_text, "key": key})
# Try to decode the output
try:
output_text = tokenizer.decode(batch_outputs[request].generated_tokens, skip_special_tokens=False)
token_count += len(batch_outputs[request].generated_tokens)
data[-1]["cb_outputs"] = output_text
except Exception as e:
print(f"Decoding failed for request {request}: {e}")
data[-1]["cb_outputs"] = "__ERROR__"
continue
# Display sample if asked
if i < displayed_samples:
print("-" * 20, f"{request} Input: {input_text}", f"{request} Output: {output_text}", sep="\n")
# Compare with classic generate if asked
if expected_outputs is not None:
expected_output = expected_outputs.pop(key)
matches = output_text == expected_output # TODO: rework this for a better distance metric
data[-1]["without_cb"] = expected_output
data[-1]["matches"] = matches
data[-1].pop("key")
print(f"Request {i} matches" if matches else f"Request {i} does NOT match!")
# Compute stats and maybe print them
gen_time = end_time_simple - start_time_simple
tok_per_sec = token_count / gen_time
if displayed_samples >= 0:
print("-" * 20)
print("--- Finished CB Generation Example ---\n")
print(f"CB generation took: {gen_time:.2f} seconds for {token_count} tokens. {tok_per_sec:.2f}tok/s")
stats = {
"num_blocks": generation_config.num_blocks,
"max_batch_tokens": generation_config.max_batch_tokens,
"gen_time": gen_time,
"token_count": token_count,
"tok_per_sec": tok_per_sec,
}
# If an output file is provided, save the reordered data to it
data.sort(key=lambda x: x["input"])
data = [stats] + data
if output_file is not None:
with open(output_file, "w") as f:
json.dump(data, f, indent=4)
return gen_time, tok_per_sec
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Continuous batching parameters
parser.add_argument("--num-blocks", "-n", type=int, default=None)
parser.add_argument("--max-batch-tokens", "-b", type=int, default=None)
# Model parameters
parser.add_argument("--sliding-window", type=int, default=0)
parser.add_argument("--attn", type=str, default=None, help="Attention implementation")
# Performance parameters
parser.add_argument("--matmul-precision", "-mp", type=str, default="high") # set to "none" to disable
parser.add_argument("--cuda-graph", "-cg", help="Use cuda graphs", type=str, default=None)
parser.add_argument("--compile", action="store_true", help="Compile the model using torch.compile")
parser.add_argument("--use-async", action="store_true", help="Use the asynchronous API for CB")
parser.add_argument("--do-sample", action="store_true", help="Activate sampling")
parser.add_argument("--num-return-sequences", type=int, default=1, help="Number of return sequences")
# Benchmark parameters
parser.add_argument("--samples", type=int, default=500, help="Number of samples to generate")
parser.add_argument(
"--input-length", type=int, default=None, help="Length of input sequences. Leave to None to mimic real eval."
)
parser.add_argument("--max-new-tokens", type=int, default=512, help="Maximum number of new tokens to generate")
parser.add_argument("--force-max-length", action="store_true", help="Force generation to stop at max length")
parser.add_argument("--add-prefix", action="store_true", help="Add a prefix to the samples")
parser.add_argument("--compare", action="store_true", help="Compare CB generation with classic generate")
parser.add_argument("--profile", type=str, default=None)
parser.add_argument("--metrics", action="store_true")
parser.add_argument("--seed", type=int, default=None, help="Random seed")
# Display parameters
parser.add_argument("--displayed", type=int, default=0, help="Number of samples to display")
parser.add_argument("--log-level", type=str, default="INFO")
parser.add_argument("--output-file", type=str, default=None)
args = parser.parse_args()
# Choose attention implementation
if args.attn is None:
if args.compile:
args.attn = "kernels-community/flash-attn3@fake-ops-return-probs"
logger.warning(
"No attention implementation was provided and compile is enabled. Using experimental kernel: "
"kernels-community/flash-attn3@fake-ops-return-probs because compile is not supported on main. Change "
"this when main supports it." # TODO: cf comment
)
else:
args.attn = "kernels-community/flash-attn3"
# Set seed
if args.seed is not None:
torch.manual_seed(args.seed)
# Create model
model_id = "google/gemma-2-2b-it" if args.sliding_window > 0 else "meta-llama/Llama-3.1-8B-Instruct"
has_system_role = args.sliding_window == 0
model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation=args.attn, dtype=torch.bfloat16)
model = model.cuda().eval()
if args.sliding_window > 0 and getattr(model.config, "sliding_window", None) is not None:
print(f"Setting sliding window from {model.config.sliding_window} to {args.sliding_window}")
model.config.sliding_window = args.sliding_window
# Set up diagnostics
logger.setLevel(args.log_level.upper())
maybe_setup_metrics(args.metrics)
# Set up performance
if args.matmul_precision != "none":
torch.set_float32_matmul_precision(args.matmul_precision)
cuda_graph_arg = args.cuda_graph.lower() if args.cuda_graph is not None else None
use_cuda_graph = {
"none": None, None: None,
"yes": True, "y": True, "true": True, "t": True, "1": True,
"no": False, "n": False, "false": False, "f": False, "0": False,
}[cuda_graph_arg] # fmt: skip
# Prepare tokenizer and dataset
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
dataset = datasets.load_dataset("openai/gsm8k", "socratic", split="test")
dataset = dataset.select(range(args.samples))
if args.add_prefix:
possible_prefixes = [
None,
"You are a bot that solves math problems.",
"You are a bot who solves math problems. Try to make your answer clear and understandable, and include your stages of reasoning.",
"You are a bot with the aim to solves math problems. Try to make your answer clear and understandable, and include your stages of reasoning. No loud words or emojis, all responses must be readable by a child. Here is now the problem:",
] # fmt: skip
else:
possible_prefixes = [None]
tokenizer_kwargs = {"add_generation_prompt": True}
if args.input_length is not None:
tokenizer_kwargs["max_length"] = args.input_length
tokenizer_kwargs["truncation"] = True
tokenizer_kwargs["padding"] = True
tokenizer.pad_token_id = tokenizer.eos_token_id
batched_inputs = []
for item, prefix in zip(dataset, cycle(possible_prefixes)):
messages = []
question = item["question"]
if prefix is not None:
if has_system_role:
messages.append({"role": "system", "content": prefix})
else:
question = prefix + "\n\n" + question
messages.append({"role": "user", "content": question})
inputs = tokenizer.apply_chat_template(messages, **tokenizer_kwargs)
inputs = inputs if isinstance(inputs, list) else inputs["input_ids"]
batched_inputs.append(inputs)
# If num_return_sequences > 1, automatically enable do_sample with a warning
do_sample = args.do_sample
if args.num_return_sequences != 1 and not args.do_sample:
logger.warning(
f"num_return_sequences={args.num_return_sequences} > 1, automatically enabling do_sample=True. "
"Set --do-sample explicitly to suppress this warning."
)
do_sample = True
# Prepare generation config
generation_cfg = GenerationConfig(
max_new_tokens=args.max_new_tokens,
use_cuda_graph=use_cuda_graph,
eos_token_id=tokenizer.pad_token_id if args.force_max_length else tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
do_sample=do_sample,
temperature=0.8,
top_p=0.9,
num_blocks=args.num_blocks,
max_batch_tokens=args.max_batch_tokens,
num_return_sequences=args.num_return_sequences,
)
# Add a compile config if requested
if args.compile:
generation_cfg.compile_config = CompileConfig(
fullgraph=True,
mode="max-autotune-no-cudagraphs",
dynamic=True, # FIXME: if we warmup all graphs, this is not needed anymore
)
# If we need to compare, we need to generate the reference outputs
if args.compare:
expected_outputs = generate_without_cb(
model_id, args.sliding_window, args.attn, batched_inputs, generation_cfg
)
else:
expected_outputs = None
# If no output file is provided, we pick a name based on the args
if args.output_file is None:
os.makedirs("runs/cb", exist_ok=True)
attn = args.attn.replace("|", "_").replace("/", "_")
args.output_file = f"runs/cb/{attn}_{args.samples}_{args.cuda_graph}.json"
# Run warmup batch generation if log level is above DEBUG # TODO: understand why warmup incurs a large overhead during cache creation
if logger.level > logging.DEBUG:
batch_generate(
model,
batched_inputs[: min(5, args.samples)],
generation_cfg,
tokenizer,
displayed_samples=-1,
use_async=args.use_async,
)
if args.profile is not None:
cm = profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True)
else:
cm = contextlib.nullcontext()
with cm as prof:
# Run batch generation
gen_time, tok_per_sec = batch_generate(
model,
batched_inputs,
generation_cfg,
tokenizer,
displayed_samples=args.displayed,
output_file=args.output_file,
expected_outputs=expected_outputs,
use_async=args.use_async,
)
if args.profile is not None:
filename = args.profile if args.profile.endswith(".json") else args.profile + ".json"
prof.export_chrome_trace(filename)
# Example usage:
# python examples/pytorch/continuous_batching.py --attn sdpa --add-prefix --samples 10 --compare
# python examples/pytorch/continuous_batching.py --attn flash_attention_2 -mp none --add-prefix --samples 500
# python examples/pytorch/continuous_batching.py -mp none -cg yes --samples 10 --max-new-tokens 32 --profile profile_wip.json
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/pytorch/continuous_batching.py",
"license": "Apache License 2.0",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/integrations/eager_paged.py | import torch
from torch import nn
from ..generation.continuous_batching.cache import PagedAttentionCache
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_paged_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None, # shape [seqlen_q, seqlen_k]
scaling: float,
**kwargs,
):
# Add KV cache to the key and value tensors
cache: PagedAttentionCache | None = kwargs.pop("cache", None)
if cache is not None:
# This changes the shape of k and v from [1, num_kv_heads, seqlen_kv, head_dim] to [-1, num_kv_heads, head_dim]
key, value = cache.update(
key_states=key,
value_states=value,
layer_idx=module.layer_idx,
read_index=kwargs["read_index"],
write_index=kwargs["write_index"],
)
key = key.transpose(0, 1).unsqueeze(0)
value = value.transpose(0, 1).unsqueeze(0)
# Repeat the key and value tensors for each group of key-value heads
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
# Get the right causal mask for the current layer
if isinstance(attention_mask, dict):
sliding_window = getattr(module, "sliding_window", 1)
layer_type = "full_attention" if sliding_window == 1 or sliding_window is None else "sliding_attention"
causal_mask = attention_mask[layer_type]
else:
causal_mask = attention_mask
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if causal_mask is not None:
attn_weights = attn_weights + causal_mask
# Handle attention sinks if the model has them
if hasattr(module, "sinks"):
# Retrieve the sink and add it to the attention weights
sinks = module.sinks.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
attn_weights = torch.cat([attn_weights, sinks], dim=-1)
# Normalize the attention weights for better numerical stability
attn_weights = attn_weights - attn_weights.max(dim=-1, keepdim=True).values
# Apply softmax and drop the sink. Not exactly the same code as eager w/ sink, but the same code does not produce the same results.
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = attn_weights[..., :-1]
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/eager_paged.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/integrations/flash_paged.py | import torch
from ..generation.continuous_batching import PagedAttentionCache
from ..modeling_flash_attention_utils import lazy_import_paged_flash_attention
def paged_attention_forward(
module: torch.nn.Module,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
attention_mask: torch.Tensor | None = None,
cache: PagedAttentionCache = None,
cu_seq_lens_q=None,
cu_seq_lens_k=None,
max_seqlen_q=None,
max_seqlen_k=None,
**kwargs,
) -> torch.Tensor:
r"""Perform the forward pass of attention with paged key-value cache.
This function handles the cache updates and performs the attention computation
using the flash_attn_varlen_func for efficient processing.
Args:
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch. but if there is a block table it can be the full k
v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch. but if there is a block table it can be the full v
cu_seq_lens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into q.
cu_seq_lens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into kv.
max_seqlen_q: int. Maximum query sequence length in the batch.
max_seqlen_k: int. Maximum key sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
softcap: float. Anything > 0 activates softcapping attention.
"""
flash_attn_varlen_func = lazy_import_paged_flash_attention(module.config._attn_implementation)
sliding_window = (-1, -1) if not getattr(module, "sliding_window", False) else (module.sliding_window - 1, 0)
layer_type = "full_attention" if sliding_window == (-1, -1) else "sliding_attention"
# .update changes the shape of k and v from [1, num_kv_heads, seqlen_kv, head_dim] to [-1, num_kv_heads, head_dim]
if cache is not None:
k, v = cache.update(
key_states=k,
value_states=v,
layer_idx=module.layer_idx,
read_index=kwargs["read_index"],
write_index=kwargs["write_index"],
)
# Retrieve the cumulative sequence lengths for the current layer
if isinstance(cu_seq_lens_k, dict):
cu_seq_lens_k = cu_seq_lens_k[layer_type]
max_seqlen_k = max_seqlen_k[layer_type]
custom_kwargs = {"s_aux": kwargs.get("s_aux")} if "s_aux" in kwargs else {}
attn_output = flash_attn_varlen_func(
q.transpose(1, 2).squeeze(0).contiguous(),
k.contiguous(),
v.contiguous(),
cu_seq_lens_q.to(torch.int32),
cu_seq_lens_k.to(torch.int32).clone(),
max_seqlen_q,
max_seqlen_k,
softmax_scale=module.scaling,
causal=True, # kind of a must, it automatically aligns the mask for q < k
window_size=sliding_window, # -1 means infinite context window
**custom_kwargs,
)
if isinstance(attn_output, tuple):
attn_output = attn_output[0]
return attn_output, None
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/flash_paged.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:src/transformers/integrations/sdpa_paged.py | import torch
from ..generation.continuous_batching.cache import PagedAttentionCache
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_paged_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
dropout: float = 0.0,
scaling: float | None = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
# Add KV cache to the key and value tensors
cache: PagedAttentionCache | None = kwargs.pop("cache", None)
if cache is not None:
# This changes the shape of k and v from [1, num_kv_heads, seqlen_kv, head_dim] to [-1, num_kv_heads, head_dim]
key, value = cache.update(
key_states=key,
value_states=value,
layer_idx=module.layer_idx,
read_index=kwargs["read_index"],
write_index=kwargs["write_index"],
)
key = key.transpose(0, 1).unsqueeze(0)
value = value.transpose(0, 1).unsqueeze(0)
# Repeat the key and value tensors for each group of key-value heads
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
# Get the right causal mask for the current layer
causal_mask = attention_mask
# Run the actual attention
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=causal_mask,
dropout_p=dropout,
scale=scaling,
# Packed sequence format is used for input, so that it can never be causal.
is_causal=False,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/integrations/sdpa_paged.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/transformers:src/transformers/utils/metrics.py | import functools
import logging
import time
from collections.abc import Callable
from enum import Enum
from typing import Any
from .import_utils import is_opentelemetry_available
class RequestStatus(Enum):
"""Status of a generation request through its lifecycle."""
PENDING = "pending"
PREFILLING = "prefilling"
PREFILLING_SPLIT = "prefilling_split"
SPLIT_PENDING_REMAINDER = "split_pending_remainder"
DECODING = "decoding"
FINISHED = "finished"
FAILED = "failed"
if is_opentelemetry_available():
from opentelemetry import metrics
from opentelemetry.trace import Status, StatusCode, get_tracer
_has_opentelemetry = True
else:
_has_opentelemetry = False
def attach_tracer(tracer_name_template=None):
"""
Decorator that attaches a tracer to a class.
This decorator should be applied to classes that need OpenTelemetry tracing.
It adds a tracer attribute to the class instance that can be used by the traced decorator.
Args:
tracer_name_template: Optional template string for the tracer name.
If provided, it should contain {module} which will be replaced with the class's full module path
and {class_name} for the class name.
If None, a default naming scheme will be used where:
- If the module already starts with "transformers.", it will use that directly
- Otherwise, it will prepend "transformers." to the module name
Returns:
Class decorator function
"""
if not _has_opentelemetry:
return lambda cls: cls
def decorator(cls):
original_init = cls.__init__
@functools.wraps(original_init)
def init_with_tracer(self, *args, **kwargs):
original_init(self, *args, **kwargs)
module_name = cls.__module__
class_name = cls.__qualname__
if tracer_name_template is None:
if module_name.startswith("transformers."):
tracer_name = f"{module_name}.{class_name}"
else:
tracer_name = f"transformers.{module_name}.{class_name}"
else:
tracer_name = tracer_name_template.format(module=module_name, class_name=class_name)
self.tracer = get_tracer(tracer_name)
cls.__init__ = init_with_tracer
return cls
return decorator
def traced(
func=None,
*,
span_name=None,
standalone=False,
additional_attributes: list[tuple[str, str, Any | Callable[[Any], Any]]] | None = None,
):
"""
Decorator to trace function calls with OpenTelemetry.
Can be used as @traced or @traced(span_name="custom_name")
Args:
func: The function to trace
span_name: Optional custom name for the span (defaults to function name)
standalone: If True, creates a parentless span
additional_attributes: Optional list of additional attributes to set on the span.
Each item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function)
where:
- instance_attribute_name: Name of the attribute to get from the class instance
- span_attribute_key: Key to use when setting the attribute on the span
- value_or_transform_function: Either a raw value to use directly, or a function to transform
the attribute value before setting it on the span
Returns:
Decorated function with tracing
"""
def decorator(func):
if not _has_opentelemetry:
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
instance = args[0] if args and (hasattr(func, "__self__") and func.__self__ is not None) else None
is_method = instance is not None
if is_method and hasattr(instance, "tracer"):
tracer = instance.tracer
else:
tracer = get_tracer(f"transformers.{func.__module__}.{func.__name__}")
name = span_name or func.__name__
span_fn = tracer.start_span if standalone else tracer.start_as_current_span
with span_fn(name) as span:
span.set_attribute("function.name", func.__name__)
span.set_attribute("function.module", func.__module__)
span.set_attribute("function.is_method", is_method)
if args:
for i, arg in enumerate(args):
if isinstance(arg, (str, int, float, bool)) or arg is None:
span.set_attribute(f"args.{i}", str(arg))
else:
span.set_attribute(f"args.{i}", str(type(arg)))
if kwargs:
for key, value in kwargs.items():
if isinstance(value, (str, int, float, bool)) or value is None:
span.set_attribute(f"kwargs.{key}", str(value))
else:
span.set_attribute(f"kwargs.{key}", str(type(value)))
if additional_attributes and is_method:
for attr_config in additional_attributes:
instance_attribute_name, span_attribute_key, value_or_transform_function = attr_config
if hasattr(instance, instance_attribute_name):
attribute_value = getattr(instance, instance_attribute_name)
if callable(value_or_transform_function):
transformed_value = value_or_transform_function(attribute_value)
else:
transformed_value = value_or_transform_function
span.set_attribute(span_attribute_key, transformed_value)
try:
result = func(*args, **kwargs)
return result
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(e)
raise
return wrapper
if func is None:
return decorator
return decorator(func)
logger = logging.getLogger(__name__)
@attach_tracer()
class ContinuousBatchProcessorMetrics:
"""Metrics collection for ContinuousBatchProcessor."""
def __init__(self, max_batch_tokens: int):
"""Initialize metrics for continuous batch processor.
Args:
max_batch_tokens: Maximum number of tokens in a batch
"""
self.max_batch_tokens = max_batch_tokens
self._setup_metrics()
def _setup_metrics(self):
"""Initialize OpenTelemetry metrics and tracing if the library is available."""
if not _has_opentelemetry:
logger.info(
"OpenTelemetry is not installed. Metrics and tracing will not be recorded."
"You can install it with `pip install opentelemetry-api>=1.30.0`"
)
return
self.meter = metrics.get_meter("transformers.generation.continuous_batch_processor")
# Define appropriate buckets for TTFT (typically ranges from ~50ms to several seconds)
ttft_buckets = [10, 25, 50, 75, 100, 150, 200, 300, 500, 750, 1000, 2000, 5000, 10000]
self.ttft_histogram = self.meter.create_histogram(
name="ttft_milliseconds",
description="Time to first token in milliseconds",
unit="ms",
explicit_bucket_boundaries_advisory=ttft_buckets,
)
self.active_requests_gauge = self.meter.create_gauge(
name="active_requests_count",
description="Number of active requests currently being processed",
unit="requests",
)
self.waiting_requests_gauge = self.meter.create_gauge(
name="waiting_requests_count",
description="Number of requests waiting to be processed",
unit="requests",
)
# Define appropriate buckets for request latency (similar to TTFT but with higher upper bounds)
latency_buckets = [50, 100, 250, 500, 1000, 2000, 5000, 10000, 20000, 30000, 60000]
self.request_latency_histogram = self.meter.create_histogram(
name="request_latency_milliseconds",
description="End-to-end latency for completed requests in milliseconds",
unit="ms",
explicit_bucket_boundaries_advisory=latency_buckets,
)
self.decode_prefill_ratio_gauge = self.meter.create_gauge(
name="decode_prefill_ratio",
description="Ratio of decode tokens to prefill tokens in a batch",
unit="ratio",
)
self.prefill_tokens_counter = self.meter.create_counter(
name="prefill_tokens_processed",
description="Number of prefill tokens processed",
unit="tokens",
)
self.decode_tokens_counter = self.meter.create_counter(
name="decode_tokens_processed",
description="Number of decode tokens processed",
unit="tokens",
)
# Define appropriate buckets for batch fill percentage (0-100%)
batch_fill_buckets = [5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 98, 100]
self.batch_fill_percentage_histogram = self.meter.create_histogram(
name="batch_fill_percentage",
description="Percentage of max_batch_tokens utilized in each batch",
unit="percent",
explicit_bucket_boundaries_advisory=batch_fill_buckets,
)
self.kv_cache_free_memory_gauge = self.meter.create_gauge(
name="kv_cache_free_memory_bytes",
description="Free memory of the PagedAttentionCache in bytes",
unit="bytes",
)
self.kv_cache_memory_gauge = self.meter.create_gauge(
name="kv_cache_memory_bytes",
description="Memory usage of the PagedAttentionCache in bytes",
unit="bytes",
)
@traced
def record_ttft_metric(self, created_time: float, request_id: str) -> None:
"""Record Time to First Token (TTFT).
Args:
created_time: The time the request was created
request_id: The ID of the request
"""
if not _has_opentelemetry:
return
ttft_ms = (time.time() - created_time) * 1000.0
try:
self.ttft_histogram.record(ttft_ms)
logger.debug(f"Recorded TTFT for request {request_id}: {ttft_ms:.2f}ms")
except Exception as e:
logger.warning(f"Failed to record TTFT metric: {e}")
@traced
def record_batch_metrics(self, requests_in_batch: list) -> None:
"""Record metrics about the batch composition including decode/prefill ratio and batch fill percentage.
Args:
requests_in_batch: List of request states in the current batch
"""
if not _has_opentelemetry or not requests_in_batch:
return
decode_tokens = 0
prefill_tokens = 0
for state in requests_in_batch:
if state.status == RequestStatus.DECODING:
decode_tokens += 1
elif state.status in [RequestStatus.PREFILLING, RequestStatus.PREFILLING_SPLIT]:
prefill_tokens += len(state.prompt_ids)
total_batch_tokens = decode_tokens + prefill_tokens
try:
if prefill_tokens > 0:
self.prefill_tokens_counter.add(prefill_tokens)
if decode_tokens > 0:
self.decode_tokens_counter.add(decode_tokens)
if prefill_tokens > 0:
ratio = decode_tokens / prefill_tokens
self.decode_prefill_ratio_gauge.set(ratio)
fill_percentage = (total_batch_tokens / self.max_batch_tokens) * 100.0
self.batch_fill_percentage_histogram.record(fill_percentage)
logger.debug(
f"Batch metrics: {decode_tokens} decode tokens, {prefill_tokens} prefill tokens, "
f"batch fill: {fill_percentage:.2f}% ({total_batch_tokens}/{self.max_batch_tokens})"
)
except Exception as e:
logger.warning(f"Failed to record batch metrics: {e}")
@traced
def record_kv_cache_memory_metrics(self, cache) -> None:
"""Record memory usage of the PagedAttentionCache without GPU synchronization.
This calculates the theoretical memory usage based on cache configuration
and the number of blocks currently in use.
Args:
cache: The PagedAttentionCache object to measure
"""
if not _has_opentelemetry:
return
try:
# Retrieve the memory footprint of the cache
page_size = cache.head_dim * cache.num_key_value_heads
page_mem_in_bytes = page_size * cache.dtype.itemsize
# When a block is allocated, it is for both K and V, so we multiply by 2
# It's also allocated across all cache tensors, so we multiply by the nb of tensors: len(cache.key_cache)
block_mem_in_bytes = 2 * len(cache.key_cache) * cache.block_size * page_mem_in_bytes
# Retrieve the number of used and free blocks
free_blocks = cache.get_num_free_blocks()
used_blocks = cache.num_blocks - free_blocks
# Convert that into used and free memory in bytes
used_memory_bytes = used_blocks * block_mem_in_bytes
free_memory_bytes = free_blocks * block_mem_in_bytes
# Update the telemetry gauges and add a message in the logs
self.kv_cache_memory_gauge.set(used_memory_bytes)
self.kv_cache_free_memory_gauge.set(free_memory_bytes)
logger.debug(
f"KV Cache memory: {used_memory_bytes / (1024 * 1024):.2f}MB, "
f"Used blocks: {used_blocks}/{cache.num_blocks} "
f"({used_blocks / cache.num_blocks * 100:.1f}%)"
)
except Exception as e:
logger.warning(f"Failed to record KV cache memory metrics: {e}")
@traced
def record_queue_metrics(self, active_requests: int, waiting_requests: int) -> None:
"""Record metrics about active and waiting requests.
Args:
active_requests: Number of active requests
waiting_requests: Number of waiting requests
"""
if not _has_opentelemetry:
return
try:
self.active_requests_gauge.set(active_requests)
self.waiting_requests_gauge.set(waiting_requests)
logger.debug(f"Queue metrics: {active_requests} active requests, {waiting_requests} waiting requests")
except Exception as e:
logger.warning(f"Failed to record queue metrics: {e}")
@traced
def record_request_completion(self, created_time: float, request_id: str) -> None:
"""Record metrics about a completed request.
Args:
created_time: The time the request was created
request_id: The ID of the request
"""
if not _has_opentelemetry:
return
latency_ms = (time.time() - created_time) * 1000.0
try:
self.request_latency_histogram.record(latency_ms)
logger.debug(f"Recorded request completion for {request_id}: {latency_ms:.2f}ms")
except Exception as e:
logger.warning(f"Failed to record request completion metric: {e}")
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/utils/metrics.py",
"license": "Apache License 2.0",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/transformers:tests/generation/test_paged_attention.py | import time
import unittest
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.testing_utils import Expectations, slow
_TEST_PROMPTS = [
"A man is a walking his dog down the street, and a the turn he sees",
"Describe a fruit that is of orange color and round. It is a sweet fruit and a great source of Vitamine C. The fruit I'm thinking of is an",
"A plane is flying high in the sky, out of the window are clouds and mountains. Where could the plane be located?",
"Please fill in the form to",
"For safety reasons, the train is stopped in the middle of the",
]
_EXPECTED_OUTPUTS = Expectations(
{
("cpu", None): [ # FIXME: CPU tests only pass for eager and flex. Maybe the test should be re-thought.
"a woman standing on the sidewalk, looking at him. He is immediately drawn to her and feels a strong attraction. He walks up to her and strikes",
"orange.\n\n## Step 1: Identify the key characteristics of the fruit\nThe fruit is described as being orange in color and round in shape.\n\n##",
"This riddle is a classic example of a lateral thinking puzzle, which requires the test-taker to think creatively and consider multiple possibilities. The answer",
"get in touch with us. We will respond to your message as soon as possible.\n\n[Your Name]\n[Your Email]\n[Your Phone Number]",
"track. The train is stopped because of a mechanical failure. The train is stopped because of a mechanical failure. The train is stopped because of a mechanical",
# TODO: investigate why that last expectation seems incorrect
],
("cuda", (9, 0)): [ # A10 and H100
"a woman standing on the sidewalk, looking at him. He is immediately drawn to her and feels a strong attraction. He walks up to her and strikes",
"orange.\n\n## Step 1: Identify the key characteristics of the fruit\nThe fruit is described as being orange in color and round in shape.\n\n##",
"This riddle is a classic example of a lateral thinking puzzle, which requires the test-taker to think creatively and consider multiple possibilities. The answer",
"get in touch with us. We will respond to your message as soon as possible.\n\n[Your Name]\n[Your Email]\n[Your Phone Number]",
"track. The train is stopped for 30 minutes. The train is moving at a speed of 60 km/h. How many kilometers does the train",
],
}
)
@slow
class TestBatchGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-3b-Instruct", dtype="bfloat16", device_map="auto"
).eval()
cls.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3b-Instruct", padding_side="left")
if cls.tokenizer.pad_token is None:
cls.tokenizer.pad_token = cls.tokenizer.eos_token
cls.model.config.pad_token_id = cls.model.config.eos_token_id
cls.model.use_cache = False
@parameterized.expand(
[
("paged|eager", 64, 128, 64),
("paged|sdpa", 32, 256, 128),
("paged|flash_attention_2", 16, 512, 256),
("paged|flex_attention", 64, 128, 64),
]
)
def test_generate_batch_consistency(self, attn_impl, num_blocks, block_size, max_batch_tokens):
self.model.config.attn_implementation = attn_impl
generation_config = GenerationConfig(
max_new_tokens=30,
top_k=0,
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_cache=False,
num_blocks=num_blocks,
block_size=block_size,
max_batch_tokens=max_batch_tokens,
)
tokenized = self.tokenizer(_TEST_PROMPTS, truncation=True, max_length=512)
batch_inputs = list(tokenized["input_ids"])
batch_outputs = self.model.generate_batch(
inputs=batch_inputs,
generation_config=generation_config,
)
expected_outputs = _EXPECTED_OUTPUTS.get_expectation()
for i, (output, expected_output) in enumerate(zip(batch_outputs.values(), expected_outputs)):
generated = self.tokenizer.decode(output.generated_tokens, skip_special_tokens=False).strip()
expected = expected_output.strip()
self.assertEqual(
generated,
expected,
msg=f"[{attn_impl}] Mismatch in request {i}:\nExpected: {expected}\nGot: {generated}",
)
@parameterized.expand(
[
("paged|eager", 64, 128, 64),
("paged|sdpa", 32, 256, 128),
("paged|flash_attention_2", 16, 512, 256),
("paged|flex_attention", 64, 128, 64),
]
)
def test_generate_batch_with_sampling(self, attn_impl, num_blocks, block_size, max_batch_tokens):
"""Test batch generation with do_sampling=True to verify sampling works correctly."""
self.model.config.attn_implementation = attn_impl
generation_config = GenerationConfig(
max_new_tokens=30,
do_sample=True,
top_k=50,
top_p=0.9,
temperature=0.8,
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_cache=False,
num_blocks=num_blocks,
block_size=block_size,
max_batch_tokens=max_batch_tokens,
)
tokenized = self.tokenizer(_TEST_PROMPTS, truncation=True, max_length=512) # Use fewer prompts for faster test
batch_inputs = list(tokenized["input_ids"])
start = time.time()
batch_outputs = self.model.generate_batch(
inputs=batch_inputs,
generation_config=generation_config,
)
end = time.time()
print(
f"\n[{attn_impl}] Sampling batch took {end - start:.2f}s with config: blocks={num_blocks}, block_size={block_size}, max_batch_tokens={max_batch_tokens}"
)
# With sampling enabled, we can't check exact outputs, but we should verify:
# 1. All requests completed successfully
# 2. Generated text is non-empty
# 3. Generated text is different from greedy (demonstrating sampling is working)
self.assertEqual(len(batch_outputs), len(batch_inputs), f"[{attn_impl}] Not all requests completed")
for i, req_id in enumerate(batch_outputs):
generated = self.tokenizer.decode(
batch_outputs[req_id].generated_tokens, skip_special_tokens=False
).strip()
self.assertTrue(
len(generated) > 0,
msg=f"[{attn_impl}] Empty output for request {i}",
)
# Check that we got at least some tokens generated
generated_tokens = batch_outputs[req_id].generated_tokens
self.assertGreater(
len(generated_tokens),
0,
msg=f"[{attn_impl}] No tokens generated for request {i}",
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/generation/test_paged_attention.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/biogpt/modular_biogpt.py | # Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BioGPT model."""
import math
import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...masking_utils import create_causal_mask
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
logger,
)
from ..bart.modeling_bart import (
BartAttention,
BartDecoderLayer,
BartScaledWordEmbedding,
)
from ..opt.modeling_opt import OPTLearnedPositionalEmbedding
from .configuration_biogpt import BioGptConfig
class BioGptLearnedPositionalEmbedding(OPTLearnedPositionalEmbedding):
def forward(
self,
attention_mask: torch.LongTensor,
past_key_values_length: int = 0,
position_ids: torch.LongTensor | None = None,
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
super().forward(attention_mask, past_key_values_length, position_ids)
class BioGptScaledWordEmbedding(BartScaledWordEmbedding):
pass
class BioGptAttention(BartAttention):
pass
class BioGptDecoderLayer(BartDecoderLayer):
def __init__(self, config: BioGptConfig, layer_idx: int | None = None):
super().__init__(config)
self.embed_dim = config.hidden_size
self.self_attn = BioGptAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_probs_dropout_prob,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.hidden_dropout_prob
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim)
del self.encoder_attn
del self.encoder_attn_layer_norm
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
output_attentions: bool | None = False,
use_cache: bool | None = True,
position_ids: torch.LongTensor | None = None,
cache_position: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
position_ids=position_ids,
cache_position=cache_position,
**kwargs,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
class BioGptPreTrainedModel(PreTrainedModel):
config: BioGptConfig
base_model_prefix = "biogpt"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
@auto_docstring
class BioGptModel(BioGptPreTrainedModel):
def __init__(self, config: BioGptConfig):
super().__init__(config)
self.config = config
self.layerdrop = config.layerdrop
self.dropout = config.hidden_dropout_prob
self.embed_dim = config.hidden_size
self.padding_idx = config.pad_token_id
embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
self.embed_tokens = BioGptScaledWordEmbedding(
config.vocab_size, self.embed_dim, self.padding_idx, embed_scale=embed_scale
)
self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim)
self.layers = nn.ModuleList([BioGptDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.layer_norm = nn.LayerNorm(self.embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = None,
position_ids: torch.LongTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPastAndCrossAttentions:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
# initialize past_key_values
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None:
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = past_key_values
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=self_attn_cache,
)
# embed positions
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
positions = self.embed_positions(attention_mask, past_key_values_length, position_ids=position_ids)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
position_ids=position_ids,
cache_position=cache_position,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
hidden_states = self.layer_norm(hidden_states)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring(
custom_intro="""
BioGPT Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
class BioGptForCausalLM(BioGptPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"output_projection.weight": "biogpt.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.biogpt = BioGptModel(config)
self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.output_projection
def set_output_embeddings(self, new_embeddings):
self.output_projection = new_embeddings
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
past_key_values: Cache | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
position_ids: torch.LongTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.Tensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | CausalLMOutputWithCrossAttentions:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.biogpt(
input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.output_projection(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring
class BioGptForTokenClassification(BioGptPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.biogpt = BioGptModel(config)
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
else:
classifier_dropout = config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
token_type_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
position_ids: torch.LongTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.Tensor | None = None,
**kwargs,
) -> tuple | TokenClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.biogpt(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The BioGpt Model transformer with a sequence classification head on top (linear layer).
[`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it is required to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
class BioGptForSequenceClassification(BioGptPreTrainedModel):
def __init__(self, config: BioGptConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.biogpt = BioGptModel(config)
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
position_ids: torch.LongTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.Tensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs,
) -> tuple | SequenceClassifierOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.biogpt(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.score(hidden_states[:, slice_indices, :])
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None:
sequence_length = -1
else:
if input_ids is not None:
sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
else:
sequence_length = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def get_input_embeddings(self):
return self.biogpt.embed_tokens
def set_input_embeddings(self, value):
self.biogpt.embed_tokens = value
__all__ = [
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/biogpt/modular_biogpt.py",
"license": "Apache License 2.0",
"lines": 534,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/informer/modular_informer.py | # Copyright 2023 Amazon and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Informer model."""
import numpy as np
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, EncoderDecoderCache
from ...masking_utils import create_bidirectional_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
from ...utils import auto_docstring
from ..bart.modeling_bart import BartAttention
from ..time_series_transformer.modeling_time_series_transformer import (
TimeSeriesFeatureEmbedder,
TimeSeriesMeanScaler,
TimeSeriesNOPScaler,
TimeSeriesSinusoidalPositionalEmbedding,
TimeSeriesStdScaler,
TimeSeriesTransformerDecoder,
TimeSeriesTransformerDecoderLayer,
TimeSeriesTransformerEncoder,
TimeSeriesTransformerEncoderLayer,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesValueEmbedding,
)
from .configuration_informer import InformerConfig
def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
"""
Computes the negative log likelihood loss from input distribution with respect to target.
"""
return -input.log_prob(target)
class InformerFeatureEmbedder(TimeSeriesFeatureEmbedder):
pass
class InformerStdScaler(TimeSeriesStdScaler):
pass
class InformerMeanScaler(TimeSeriesMeanScaler):
pass
class InformerNOPScaler(TimeSeriesNOPScaler):
pass
class InformerSinusoidalPositionalEmbedding(TimeSeriesSinusoidalPositionalEmbedding):
pass
class InformerValueEmbedding(TimeSeriesValueEmbedding):
pass
@auto_docstring
class InformerPreTrainedModel(PreTrainedModel):
config: InformerConfig
base_model_prefix = "model"
main_input_name = "past_values"
input_modalities = ("time",)
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module: nn.Module):
super()._init_weights(module)
if isinstance(module, InformerSinusoidalPositionalEmbedding):
init.copy_(module.weight, module.create_weight())
class InformerAttention(BartAttention):
pass
class InformerProbSparseAttention(nn.Module):
"""Probabilistic Attention mechanism to select the "active"
queries rather than the "lazy" queries and provides a sparse Transformer thus mitigating the quadratic compute and
memory requirements of vanilla attention"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
sampling_factor: int = 5,
bias: bool = True,
layer_idx: int | None = None,
):
super().__init__()
self.factor = sampling_factor
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: torch.Tensor | None = None,
past_key_values: Cache | None = None,
attention_mask: torch.Tensor | None = None,
output_attentions: bool = False,
cache_position: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
key_states_time_length = key_states.size(1) # L_K
log_key_states_time_length = np.ceil(np.log1p(key_states_time_length)).astype("int").item() # log_L_K
query_states_time_length = query_states.size(1) # L_Q
log_query_states_time_length = np.ceil(np.log1p(query_states_time_length)).astype("int").item() # log_L_Q
u_part = min(self.factor * query_states_time_length * log_key_states_time_length, key_states_time_length)
u = min(self.factor * log_query_states_time_length, query_states_time_length)
if key_states_time_length > 0:
index_sample = torch.randint(0, key_states_time_length, (u_part,))
k_sample = key_states[:, index_sample, :]
else:
k_sample = key_states
queries_keys_sample = torch.bmm(query_states, k_sample.transpose(1, 2)) # Q_K_sampled
# find the Top_k query with sparsity measurement
if u > 0:
sparsity_measurement = queries_keys_sample.max(dim=-1)[0] - torch.div(
queries_keys_sample.sum(dim=-1), key_states_time_length
) # M
top_u_sparsity_measurement = sparsity_measurement.topk(u, sorted=False)[1] # M_top
# calculate q_reduce: query_states[:, top_u_sparsity_measurement]
dim_for_slice = torch.arange(query_states.size(0)).unsqueeze(-1)
q_reduce = query_states[dim_for_slice, top_u_sparsity_measurement]
else:
q_reduce = query_states
top_u_sparsity_measurement = None
# Use q_reduce to calculate attention weights
attn_weights = torch.bmm(q_reduce, key_states.transpose(1, 2))
src_len = key_states.size(1)
if attn_weights.size() != (bsz * self.num_heads, u, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, u, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
prob_mask = attention_mask.expand(bsz, self.num_heads, tgt_len, src_len).reshape(
bsz * self.num_heads, tgt_len, src_len
)
if top_u_sparsity_measurement is not None:
dim_for_slice = torch.arange(prob_mask.size(0)).unsqueeze(-1)
prob_mask = prob_mask[dim_for_slice, top_u_sparsity_measurement, :]
attn_weights = attn_weights.view(bsz, self.num_heads, u, src_len) + prob_mask.view(
bsz, self.num_heads, u, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, u, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, u, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
# calculate context for updating the attn_output, based on:
# https://github.com/zhouhaoyi/Informer2020/blob/ac59c7447135473fb2aafeafe94395f884d5c7a5/models/attn.py#L74
if self.is_decoder:
# cast to float32 before operation to avoid overflow
context = value_states.cumsum(dim=-2, dtype=torch.float32).to(value_states.dtype)
else:
v_mean_dim_time = value_states.mean(dim=-2)
context = (
v_mean_dim_time.unsqueeze(dim=1)
.expand(bsz * self.num_heads, query_states_time_length, v_mean_dim_time.size(-1))
.clone()
)
if top_u_sparsity_measurement is not None:
# update context: copy the attention output to the context at top_u_sparsity_measurement index
dim_for_slice = torch.arange(context.size(0)).unsqueeze(-1)
context[dim_for_slice, top_u_sparsity_measurement, :] = attn_output
attn_output = context
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
# source: https://github.com/zhouhaoyi/Informer2020/blob/main/models/encoder.py
class InformerConvLayer(GradientCheckpointingLayer):
def __init__(self, c_in):
super().__init__()
self.downConv = nn.Conv1d(
in_channels=c_in,
out_channels=c_in,
kernel_size=3,
padding=1,
padding_mode="circular",
)
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x
class InformerEncoderLayer(TimeSeriesTransformerEncoderLayer):
def __init__(self, config: InformerConfig):
super().__init__(config)
del self.self_attn
if config.attention_type == "prob":
self.self_attn = InformerProbSparseAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
sampling_factor=config.sampling_factor,
)
else:
self.self_attn = InformerAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
)
class InformerDecoderLayer(TimeSeriesTransformerDecoderLayer):
def __init__(self, config: InformerConfig, layer_idx: int | None = None):
super().__init__(config)
del self.self_attn
if config.attention_type == "prob":
self.self_attn = InformerProbSparseAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
sampling_factor=config.sampling_factor,
is_decoder=True,
layer_idx=layer_idx,
)
else:
self.self_attn = InformerAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
class InformerEncoder(TimeSeriesTransformerEncoder):
def __init__(self, config: InformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.gradient_checkpointing = False
if config.prediction_length is None:
raise ValueError("The `prediction_length` config needs to be specified.")
self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = InformerSinusoidalPositionalEmbedding(
config.context_length + config.prediction_length, config.d_model
)
self.layers = nn.ModuleList([InformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
if config.distil:
self.conv_layers = nn.ModuleList(
[InformerConvLayer(config.d_model) for _ in range(config.encoder_layers - 1)]
)
self.conv_layers.append(None)
else:
self.conv_layers = [None] * config.encoder_layers
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
attention_mask: torch.Tensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
**kwargs,
) -> tuple | BaseModelOutput:
r"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size())
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = create_bidirectional_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, (encoder_layer, conv_layer) in enumerate(zip(self.layers, self.conv_layers)):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
if conv_layer is not None:
output = conv_layer(layer_outputs[0])
layer_outputs = (output,) + layer_outputs[1:]
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class InformerDecoder(TimeSeriesTransformerDecoder):
def __init__(self, config: InformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
if config.prediction_length is None:
raise ValueError("The `prediction_length` config needs to be specified.")
self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = InformerSinusoidalPositionalEmbedding(
config.context_length + config.prediction_length, config.d_model
)
self.layers = nn.ModuleList([InformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
class InformerModel(TimeSeriesTransformerModel):
def __init__(self, config: InformerConfig):
PreTrainedModel.__init__(self, config)
if config.scaling == "mean" or config.scaling is True:
self.scaler = InformerMeanScaler(config)
elif config.scaling == "std":
self.scaler = InformerStdScaler(config)
else:
self.scaler = InformerNOPScaler(config)
if config.num_static_categorical_features > 0:
self.embedder = InformerFeatureEmbedder(
cardinalities=config.cardinality,
embedding_dims=config.embedding_dimension,
)
# transformer encoder-decoder and mask initializer
self.encoder = InformerEncoder(config)
self.decoder = InformerDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def forward(self, **super_kwargs):
r"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```"""
super().forward(**super_kwargs)
class InformerForPrediction(TimeSeriesTransformerForPrediction):
def __init__(self, config: InformerConfig):
PreTrainedModel.__init__(self, config)
self.model = InformerModel(config)
if config.distribution_output == "student_t":
self.distribution_output = StudentTOutput(dim=config.input_size)
elif config.distribution_output == "normal":
self.distribution_output = NormalOutput(dim=config.input_size)
elif config.distribution_output == "negative_binomial":
self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
self.target_shape = self.distribution_output.event_shape
if config.loss == "nll":
self.loss = nll
else:
raise ValueError(f"Unknown loss function {config.loss}")
# Initialize weights of distribution_output and apply final processing
self.post_init()
@auto_docstring
def forward(self, **super_kwargs):
r"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerForPrediction.from_pretrained(
... "huggingface/informer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```"""
super().forward(**super_kwargs)
__all__ = ["InformerForPrediction", "InformerModel", "InformerPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/informer/modular_informer.py",
"license": "Apache License 2.0",
"lines": 673,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/plbart/modular_plbart.py | # Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PLBART model."""
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...modeling_outputs import (
BaseModelOutput,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring
from ..bart.modeling_bart import (
BartClassificationHead,
BartDecoder,
BartEncoder,
BartForCausalLM,
BartScaledWordEmbedding,
)
from ..bigbird_pegasus.modeling_bigbird_pegasus import BigBirdPegasusForSequenceClassification
from ..mbart.modeling_mbart import shift_tokens_right
from .configuration_plbart import PLBartConfig
class PLBartScaledWordEmbedding(BartScaledWordEmbedding):
pass
@auto_docstring
class PLBartPreTrainedModel(PreTrainedModel):
config: PLBartConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["PLBartDecoderLayer", "PLBartEncoderLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, PLBartForConditionalGeneration):
init.zeros_(module.final_logits_bias)
class PLBartEncoder(BartEncoder):
pass
class PLBartDecoder(BartDecoder):
pass
@auto_docstring
class PLBartModel(PLBartPreTrainedModel):
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: PLBartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.shared = PLBartScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale)
self.encoder = PLBartEncoder(config)
self.decoder = PLBartDecoder(config)
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.Tensor | None = None,
encoder_outputs: list[torch.FloatTensor] | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor] | Seq2SeqModelOutput:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (:
obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior:
generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# different to other models, PLBart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code.
"""
)
class PLBartForConditionalGeneration(PLBartPreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
_tied_weights_keys = {
"lm_head.weight": "model.shared.weight",
}
def __init__(self, config: PLBartConfig):
super().__init__(config)
self.model = PLBartModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.post_init()
def resize_token_embeddings(
self, new_num_tokens: int, pad_to_multiple_of: int | None = None, mean_resizing: bool = True
) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.LongTensor | None = None,
decoder_input_ids: torch.LongTensor | None = None,
decoder_attention_mask: torch.Tensor | None = None,
encoder_outputs: list[torch.FloatTensor] | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
decoder_inputs_embeds: torch.FloatTensor | None = None,
labels: torch.Tensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor] | Seq2SeqLMOutput:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (:
obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior:
generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Mask-filling:
```python
>>> from transformers import AutoTokenizer, PLBartForConditionalGeneration
>>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base")
>>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
>>> # en_XX is the language symbol id <LID> for English
>>> TXT = "<s> Is 0 the <mask> Fibonacci number ? </s> en_XX"
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['first', 'same', 'highest', 'result', 'number']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0])
lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id)
class PLBartClassificationHead(BartClassificationHead):
pass
class PLBartForSequenceClassification(BigBirdPegasusForSequenceClassification):
def forward(**super_kwargs):
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (:
obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior:
generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
super().forward(**super_kwargs)
class PLBartForCausalLM(BartForCausalLM):
@auto_docstring
def forward(**super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, PLBartForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
>>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base")
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
super().forward(**super_kwargs)
__all__ = [
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/plbart/modular_plbart.py",
"license": "Apache License 2.0",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/sew/modular_sew.py | # Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SEW model."""
import math
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...integrations.fsdp import is_fsdp_managed_module
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring
from ...utils.generic import is_flash_attention_requested
from ..wav2vec2.modeling_wav2vec2 import (
Wav2Vec2Attention,
Wav2Vec2EncoderLayer,
Wav2Vec2FeatureEncoder,
Wav2Vec2FeedForward,
Wav2Vec2ForCTC,
Wav2Vec2ForSequenceClassification,
Wav2Vec2GroupNormConvLayer,
Wav2Vec2LayerNormConvLayer,
Wav2Vec2NoLayerNormConvLayer,
Wav2Vec2SamePadLayer,
_compute_mask_indices,
)
from .configuration_sew import SEWConfig
_HIDDEN_STATES_START_POSITION = 1
class SEWNoLayerNormConvLayer(Wav2Vec2NoLayerNormConvLayer):
pass
class SEWLayerNormConvLayer(Wav2Vec2LayerNormConvLayer):
pass
class SEWGroupNormConvLayer(Wav2Vec2GroupNormConvLayer):
pass
class SEWPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
stride=config.squeeze_factor,
)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name="weight", dim=2)
if hasattr(self.conv, "parametrizations"):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name="weight", dim=2)
self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class SEWSamePadLayer(Wav2Vec2SamePadLayer):
pass
class SEWUpsampling(nn.Module):
def __init__(self, config):
super().__init__()
self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
self.activation = ACT2FN[config.feat_extract_activation]
self.squeeze_factor = config.squeeze_factor
def forward(self, hidden_states):
hidden_states = self.projection(hidden_states)
hidden_states = self.activation(hidden_states)
if self.squeeze_factor > 1:
# transform embedding channels to sequence length
bsz, src_len, src_embed_dim = hidden_states.size()
tgt_len = src_len * self.squeeze_factor
tgt_embed_dim = src_embed_dim // self.squeeze_factor
hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
return hidden_states
class SEWFeatureEncoder(Wav2Vec2FeatureEncoder):
pass
class SEWAttention(Wav2Vec2Attention):
pass
class SEWFeedForward(Wav2Vec2FeedForward):
pass
class SEWEncoderLayer(Wav2Vec2EncoderLayer):
pass
class SEWEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = SEWPositionalConvEmbedding(config)
self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.upsample = SEWUpsampling(config)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
if is_flash_attention_requested(self.config):
# make sure padded tokens output 0
hidden_states[~expand_attention_mask] = 0.0
# 2d mask is passed through the layers
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
else:
# make sure padded tokens output 0
hidden_states[~expand_attention_mask] = 0.0
input_lengths = (attention_mask.long()).sum(-1)
# apply pooling formula to get real output_lengths
output_lengths = input_lengths // self.config.squeeze_factor
max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
attention_ids = (
torch.arange(0, max_encoder_length, device=output_lengths.device)
.view(1, -1)
.expand(output_lengths.shape[0], -1)
)
attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
# extend attention_mask
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
n_input_timesteps = hidden_states.shape[1]
hidden_states = hidden_states.transpose(1, 2)
position_embeddings = self.pos_conv_embed(hidden_states)
pooled_hidden_states = self.pool(hidden_states)
min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.upsample(hidden_states)
if hidden_states.shape[1] < n_input_timesteps:
hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class SEWPreTrainedModel(PreTrainedModel):
config: SEWConfig
base_model_prefix = "sew"
main_input_name = "input_values"
input_modalities = "audio"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = False # needs a proper look into the mask creation
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, SEWPositionalConvEmbedding):
init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
init.constant_(module.conv.bias, 0)
elif isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
if is_deepspeed_zero3_enabled():
import deepspeed
if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
init.kaiming_normal_(module.weight)
else:
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
init.kaiming_normal_(module.weight)
else:
init.kaiming_normal_(module.weight)
if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
init.zeros_(module.bias)
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor | int):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
@auto_docstring
class SEWModel(SEWPreTrainedModel):
def __init__(self, config: SEWConfig):
super().__init__(config)
self.config = config
self.feature_extractor = SEWFeatureEncoder(config)
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.project_features = config.conv_dim[-1] != config.hidden_size
if self.project_features:
self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = SEWEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: torch.FloatTensor | None = None,
attention_mask: torch.LongTensor | None = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(
self,
input_values: torch.Tensor | None,
attention_mask: torch.Tensor | None = None,
mask_time_indices: torch.FloatTensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
**kwargs,
) -> tuple | BaseModelOutput:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
extract_features = self.layer_norm(extract_features)
if self.project_features:
extract_features = self.feature_projection(extract_features)
hidden_states = self.feature_dropout(extract_features)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class SEWForCTC(Wav2Vec2ForCTC):
pass
class SEWForSequenceClassification(Wav2Vec2ForSequenceClassification):
pass
__all__ = ["SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/sew/modular_sew.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/masking_utils.py | # Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections.abc import Callable
import torch
import torch.nn.functional as F
from .cache_utils import Cache
from .configuration_utils import PreTrainedConfig
from .utils import is_torch_xpu_available, logging
from .utils.deprecation import deprecate_kwarg
from .utils.generic import GeneralInterface, is_flash_attention_requested
from .utils.import_utils import is_torch_flex_attn_available, is_torch_greater_or_equal, is_tracing
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import _DEFAULT_SPARSE_BLOCK_SIZE as flex_default_block_size
from torch.nn.attention.flex_attention import BlockMask, create_block_mask
else:
# Register a fake type to avoid crashing for annotations and `isinstance` checks
BlockMask = torch.Tensor
_is_torch_greater_or_equal_than_2_5 = is_torch_greater_or_equal("2.5", accept_dev=True)
_is_torch_greater_or_equal_than_2_6 = is_torch_greater_or_equal("2.6", accept_dev=True)
_is_torch_xpu_available = is_torch_xpu_available()
if _is_torch_greater_or_equal_than_2_6:
from torch._dynamo._trace_wrapped_higher_order_op import TransformGetItemToIndex
logger = logging.get_logger(__name__)
def and_masks(*mask_functions: Callable) -> Callable:
"""Returns a mask function that is the intersection of provided mask functions"""
if not all(callable(arg) for arg in mask_functions):
raise RuntimeError(f"All inputs should be callable mask_functions: {mask_functions}")
def and_mask(batch_idx, head_idx, q_idx, kv_idx):
result = q_idx.new_ones((), dtype=torch.bool)
for mask in mask_functions:
result = result & mask(batch_idx, head_idx, q_idx, kv_idx).to(result.device)
return result
return and_mask
def or_masks(*mask_functions: Callable) -> Callable:
"""Returns a mask function that is the union of provided mask functions"""
if not all(callable(arg) for arg in mask_functions):
raise RuntimeError(f"All inputs should be callable mask_functions: {mask_functions}")
def or_mask(batch_idx, head_idx, q_idx, kv_idx):
result = q_idx.new_zeros((), dtype=torch.bool)
for mask in mask_functions:
result = result | mask(batch_idx, head_idx, q_idx, kv_idx).to(result.device)
return result
return or_mask
def causal_mask_function(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
"""
This creates a basic lower-diagonal causal mask.
"""
return kv_idx <= q_idx
def bidirectional_mask_function(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
"""
This creates a full bidirectional mask.
NOTE: It is important to keep an index-based version for non-vmap expansion.
"""
return q_idx >= 0
def sliding_window_overlay(sliding_window: int) -> Callable:
"""
This is an overlay depicting a sliding window pattern. Add it on top of a causal mask for a proper sliding
window mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return kv_idx > q_idx - sliding_window
return inner_mask
def chunked_overlay(chunk_size: int, left_padding: torch.Tensor) -> Callable:
"""
This is an overlay depicting a chunked attention pattern. Add it on top of a causal mask for a proper chunked
attention mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return (kv_idx - left_padding[batch_idx]) // chunk_size == (q_idx - left_padding[batch_idx]) // chunk_size
return inner_mask
def sliding_window_causal_mask_function(sliding_window: int) -> Callable:
"""
This return the mask_function function to create a sliding window mask.
"""
return and_masks(sliding_window_overlay(sliding_window), causal_mask_function)
def sliding_window_bidirectional_overlay(sliding_window: int) -> Callable:
"""
This is an overlay depicting a bidirectional sliding window pattern.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
"""A token can attend to any other token if their absolute distance is within
the (inclusive) sliding window size (distance <= sliding_window)."""
return abs(q_idx - kv_idx) <= sliding_window
return inner_mask
def sliding_window_bidirectional_mask_function(sliding_window: int) -> Callable:
"""
This return the mask_function function to create a bidirectional sliding window mask.
"""
return and_masks(sliding_window_bidirectional_overlay(sliding_window), bidirectional_mask_function)
def chunked_causal_mask_function(chunk_size: int, left_padding: torch.Tensor) -> Callable:
"""
This return the mask_function function to create a chunked attention mask.
"""
return and_masks(chunked_overlay(chunk_size, left_padding), causal_mask_function)
def padding_mask_function(padding_mask: torch.Tensor) -> Callable:
"""
This return the mask_function function corresponding to a 2D padding mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
# Note that here the mask should ALWAYS be at least of the max `kv_index` size in the dimension 1. This is because
# we cannot pad it here in the mask_function as we don't know the final size, and we cannot try/except, as it is not
# vectorizable on accelerator devices
return padding_mask[batch_idx, kv_idx]
return inner_mask
def packed_sequence_mask_function(packed_sequence_mask: torch.Tensor) -> Callable:
"""
This return the mask_function function corresponding to a 2D packed sequence mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return packed_sequence_mask[batch_idx, q_idx] == packed_sequence_mask[batch_idx, kv_idx]
return inner_mask
def add_offsets_to_mask_function(mask_function: Callable, q_offset: int, kv_offset: int) -> Callable:
"""
This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
not start and end indices.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return mask_function(batch_idx, head_idx, q_idx + q_offset, kv_idx + kv_offset)
return inner_mask
def prepare_padding_mask(attention_mask: torch.Tensor | None, kv_length: int, kv_offset: int) -> torch.Tensor | None:
"""
From the 2D attention mask, prepare the correct padding mask to use by potentially padding it.
"""
local_padding_mask = attention_mask
if attention_mask is not None:
# Pad it if necessary
if (padding_length := kv_length + kv_offset - attention_mask.shape[-1]) > 0:
local_padding_mask = torch.nn.functional.pad(attention_mask, (0, padding_length))
return local_padding_mask
def _can_skip_causal_mask_xpu(
padding_mask: torch.Tensor | None,
query_length: int,
kv_length: int,
local_attention_size: int | None,
) -> bool:
"""
XPU-specific logic for determining if we can skip causal mask creation.
For XPU devices, we have special handling:
- Single query tokens (query_length == 1) use the same logic as CUDA
- Multi-query tokens can skip if padding_mask is provided and correctly structured
The mask must have all True values in the query window and all False after
"""
if is_tracing(padding_mask):
return False
# Check local attention constraint (same as CUDA)
if local_attention_size is not None and kv_length >= local_attention_size:
return False
if padding_mask is None:
# Without padding mask, can skip if single query token or full causal attention
return query_length == 1 or kv_length == query_length
# XPU allows skipping under additional conditions when padding_mask is provided
if query_length == 1:
# Single query token: skip only if no padding tokens present
return padding_mask.all()
# XPU-specific: check if query window is all True and rest is all False
# This allows XPU to optimize the 1st token in static cache
return padding_mask[:, :query_length].all() and not padding_mask[:, query_length:].any()
def _ignore_causal_mask_sdpa(
padding_mask: torch.Tensor | None,
query_length: int,
kv_length: int,
kv_offset: int,
local_attention_size: int | None = None,
) -> bool:
"""
Detects whether the causal mask can be ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument.
In case no token is masked in the 2D `padding_mask` argument, if `query_length == 1` or
`key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks,
allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is
passed).
"""
if padding_mask is not None and padding_mask.shape[-1] > kv_length:
mask_indices = torch.arange(kv_length, device=padding_mask.device)
mask_indices += kv_offset
padding_mask = padding_mask[:, mask_indices]
if _is_torch_xpu_available:
# XPU devices have special handling for mask skipping:
# - Single query tokens use the same logic as CUDA
# - Multi-query tokens can skip if padding_mask is provided and correctly structured
# (all True in query window, all False after)
return _can_skip_causal_mask_xpu(padding_mask, query_length, kv_length, local_attention_size)
# When using `torch.export` or `torch.onnx.dynamo_export`, we must pass an example input, and `is_causal` behavior is
# hard-coded to the forward. If a user exports a model with query_length > 1, the exported model will hard-code `is_causal=True`
# which is in general wrong (see https://github.com/pytorch/pytorch/issues/108108). Thus, we only set
# `ignore_causal_mask = True` if we are not tracing
if (
not is_tracing(padding_mask)
# only cases when lower and upper diags are the same, see https://github.com/pytorch/pytorch/issues/108108
and (query_length == 1 or kv_length == query_length)
# in this case we need to add special patterns to the mask so cannot be skipped otherwise
and (local_attention_size is None or kv_length < local_attention_size)
# In this case, we need to add padding to the mask, so cannot be skipped otherwise
and (padding_mask is None or padding_mask.all())
):
return True
return False
def _can_skip_bidirectional_mask_xpu(
padding_mask: torch.Tensor | None,
kv_length: int,
local_attention_size: int | None,
) -> bool:
"""
XPU-specific logic for determining if we can skip bidirectional mask creation.
For XPU devices, we have special handling:
- Skip if no padding and no local attention constraint
"""
if is_tracing(padding_mask):
return False
# Check local attention constraint (same as CUDA)
if local_attention_size is not None and kv_length >= local_attention_size:
return False
if padding_mask is None:
# Without padding mask, can always skip for full bidirectional attention
return True
# Skip only if no padding tokens present
return padding_mask.all()
def _ignore_bidirectional_mask_sdpa(
padding_mask: torch.Tensor | None,
kv_length: int,
local_attention_size: int | None = None,
) -> bool:
"""
Detects whether the bidirectional mask can be ignored in case PyTorch's SDPA is used.
In case no token is masked in the 2D `padding_mask` argument and no local attention constraint applies
(i.e. `local_attention_size` is None or `kv_length < local_attention_size`), we skip mask creation,
allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is
passed).
"""
if _is_torch_xpu_available:
# XPU devices have special handling for mask skipping:
# - Skip if no padding and no local attention constraint
return _can_skip_bidirectional_mask_xpu(padding_mask, kv_length, local_attention_size)
# When using `torch.export` or `torch.onnx.dynamo_export`, we need to avoid to check the contents of the mask;
# otherwise, we will encounter dynamic control flows
if (
not is_tracing(padding_mask)
and (padding_mask is None or padding_mask.all())
# in this case we need to add special patterns to the mask so cannot be skipped otherwise
and (local_attention_size is None or kv_length < local_attention_size)
):
return True
return False
def _vmap_expansion_sdpa(mask_function: Callable) -> Callable:
"""
Used to vmap our mask_functions over the all 4 dimensions (b_idx, h_idx, q_idx, kv_idx) of the inputs.
Using vmap here allows us to keep the performance of vectorized ops, while having a single set of primitive
functions between attention interfaces (i.e. between flex and sdpa/eager, FA2 being a bit different).
"""
# We vmap the function over all 4 dimensions, broadcasting [b_idx, h_idx, q_idx, kv_idx]
dimensions = [(None, None, None, 0), (None, None, 0, None), (None, 0, None, None), (0, None, None, None)]
for dims in dimensions:
mask_function = torch.vmap(mask_function, in_dims=dims, out_dims=0)
return mask_function
def _non_vmap_expansion_sdpa(
batch_indices: torch.Tensor, head_indices: torch.Tensor, q_indices: torch.Tensor, kv_indices: torch.Tensor
):
"""
Used to broadcast our mask_functions over the all 4 dimensions (b_idx, h_idx, q_idx, kv_idx) of the inputs.
Allows the usage of any index-based mask function without relying on vmap.
NOTE: This is limited to index based functions only and is not guaranteed to work otherwise.
Reference:
- https://github.com/huggingface/optimum-onnx/blob/c123e8f4fab61b54a8e0e31ce74462bcacca576e/optimum/exporters/onnx/model_patcher.py#L362-L365
"""
batch_indices = batch_indices[:, None, None, None]
head_indices = head_indices[None, :, None, None]
q_indices = q_indices[None, None, :, None]
kv_indices = kv_indices[None, None, None, :]
return batch_indices, head_indices, q_indices, kv_indices
def sdpa_mask(
batch_size: int,
cache_position: torch.Tensor,
kv_length: int,
kv_offset: int = 0,
mask_function: Callable = causal_mask_function,
attention_mask: torch.Tensor | None = None,
local_size: int | None = None,
allow_is_causal_skip: bool = True,
allow_is_bidirectional_skip: bool = False,
allow_torch_fix: bool = True,
use_vmap: bool = False,
**kwargs,
) -> torch.Tensor | None:
"""
Create a 4D boolean mask of shape `(batch_size, 1, query_length, kv_length)` where a value of True indicates that
the element should take part in the attention computation, and False that it should not.
This function can only be used with torch>=2.5, as the context manager is otherwise not available.
Args:
batch_size (`int`):
The batch size of the input sequence.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
kv_length (`int`):
The size that the key and value states will have during the attention computation.
kv_offset (`int`, optional):
An optional offset to indicate at which first position the key and values states will refer to.
mask_function (`Callable`):
The mask factory function describing the mask pattern.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
local_size (`int`, optional):
The size of the local attention, if we do not use full attention. This is used only if `allow_is_causal_skip=True`
to try to skip mask creation if possible.
allow_is_causal_skip (`bool`, optional):
Whether to allow to return `None` for the mask under conditions where we can use the `is_causal` argument in
`torch.sdpa` instead. Default to `True`.
allow_is_bidirectional_skip (`bool`, optional):
Whether to allow to return `None` for the mask under conditions where we do not have to add any bias,
i.e. full attention without any padding. Default to `False`.
allow_torch_fix (`bool`, optional):
Whether to update the mask in case a query is not attending to any tokens, to solve a bug in torch's older
versions. We need an arg to skip it when using eager. By default `True`.
use_vmap (`bool`, optional):
Whether to use `vmap` during the mask construction or not. Allows powerful custom patterns that may not be
index-based (for the cost of speed performance). By default `False`.
## Creating a simple causal mask:
To create the following causal mask:
0 ■ ⬚ ⬚ ⬚ ⬚
1 ■ ■ ⬚ ⬚ ⬚
2 ■ ■ ■ ⬚ ⬚
3 ■ ■ ■ ■ ⬚
4 ■ ■ ■ ■ ■
You can do
```python
>>> sdpa_mask(batch_size=1, cache_position=torch.arange(5), kv_length=5)
>>> tensor([[[[ True, False, False, False, False],
[ True, True, False, False, False],
[ True, True, True, False, False],
[ True, True, True, True, False],
[ True, True, True, True, True]]]])
```
## Creating a sliding window mask:
To create the following sliding window mask (`sliding_window=3`):
0 ■ ⬚ ⬚ ⬚ ⬚
1 ■ ■ ⬚ ⬚ ⬚
2 ■ ■ ■ ⬚ ⬚
3 ⬚ ■ ■ ■ ⬚
4 ⬚ ⬚ ■ ■ ■
You can do
```python
>>> sdpa_mask(batch_size=1, cache_position=torch.arange(5), kv_length=5, mask_function=sliding_window_causal_mask_function(3))
>>> tensor([[[[ True, False, False, False, False],
[ True, True, False, False, False],
[ True, True, True, False, False],
[False, True, True, True, False],
[False, False, True, True, True]]]])
```
## Creating a chunked attention mask
To create the following chunked attention mask (`chunk_size=3`):
0 ■ ⬚ ⬚ ⬚ ⬚
1 ■ ■ ⬚ ⬚ ⬚
2 ■ ■ ■ ⬚ ⬚
3 ⬚ ⬚ ⬚ ■ ⬚
4 ⬚ ⬚ ⬚ ■ ■
You can do
```python
>>> sdpa_mask(batch_size=1, cache_position=torch.arange(5), kv_length=5, mask_function=chunked_causal_mask_function(3, torch.zeros(1, dtype=int)))
>>> tensor([[[[ True, False, False, False, False],
[ True, True, False, False, False],
[ True, True, True, False, False],
[False, False, False, True, False],
[False, False, False, True, True]]]])
```
"""
q_length = cache_position.shape[0]
# Potentially pad the 2D mask
padding_mask = prepare_padding_mask(attention_mask, kv_length, kv_offset)
# Under specific conditions, we can avoid materializing the mask
# 1. Causal masks can rely on the `is_causal` argument
# 2. Bidirectional do not need any further processing (no bias)
if allow_is_causal_skip and _ignore_causal_mask_sdpa(padding_mask, q_length, kv_length, kv_offset, local_size):
return None
if allow_is_bidirectional_skip and _ignore_bidirectional_mask_sdpa(padding_mask, kv_length, local_size):
return None
# Potentially add the padding 2D mask
if padding_mask is not None:
mask_function = and_masks(mask_function, padding_mask_function(padding_mask))
batch_arange = torch.arange(batch_size, device=cache_position.device)
head_arange = torch.arange(1, device=cache_position.device)
# Similar to `kv_arange = torch.arange(start=kv_offset, end=kv_offset + kv_length, device=cache_position.device)`
# but without data-dependent slicing (i.e. torch.compile friendly)
kv_arange = torch.arange(kv_length, device=cache_position.device) + kv_offset
# Actual mask creation
# Option 1: Fast non-vmap mask creation (default)
if not use_vmap:
# Apply mask function element-wise through broadcasting
attention_mask = mask_function(*_non_vmap_expansion_sdpa(batch_arange, head_arange, cache_position, kv_arange))
# Expand the mask to match batch size and query length if they weren't used in the mask function
attention_mask = attention_mask.expand(batch_size, -1, q_length, kv_length)
# Option 2: Vmap mask creation (torch>=2.6 and custom patterns)
elif _is_torch_greater_or_equal_than_2_6:
# This creates the 4D mask easily. Note that we need this context manager as vmap cannot handle slicing a tensor from
# scalar tensor (it internally calls `.item()` which vmap does not allow, but this context works around it
# We don't need to add an offset to the mask_function either, as we vmap directly the correct indices for k and kv indices
with TransformGetItemToIndex():
attention_mask = _vmap_expansion_sdpa(mask_function)(batch_arange, head_arange, cache_position, kv_arange)
# Option 3: Error out since it indicates that the user did something custom, which they shouldn't have (torch<2.6)
else:
raise ValueError(
"The vmap functionality for mask creation is only supported from torch>=2.6. "
"Please update your torch version or use `use_vmap=False` with index-based masks."
)
# Due to a bug in versions of torch<2.5, we need to update the mask in case a query is not attending to any
# tokens (due to padding). See details in https://github.com/pytorch/pytorch/issues/110213
if not _is_torch_greater_or_equal_than_2_5 and allow_torch_fix:
attention_mask = attention_mask | torch.all(~attention_mask, dim=-1, keepdim=True)
return attention_mask
def eager_mask(
batch_size: int,
cache_position: torch.Tensor,
kv_length: int,
kv_offset: int = 0,
mask_function: Callable = causal_mask_function,
attention_mask: torch.Tensor | None = None,
dtype: torch.dtype = torch.float32,
allow_is_bidirectional_skip: bool = False,
use_vmap: bool = False,
**kwargs,
) -> torch.Tensor:
"""
Create a 4D float mask of shape `(batch_size, 1, query_length, kv_length)` where a value of 0 indicates that
the element should take part in the attention computation, and -inf (minimum value for the given `dtype`) that
it should not.
Args:
batch_size (`int`):
The batch size of the input sequence.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
kv_length (`int`):
The size that the key and value states will have during the attention computation.
kv_offset (`int`, optional):
An optional offset to indicate at which first position the key and values states will refer to.
mask_function (`Callable`):
The mask factory function describing the mask pattern.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
dtype (`torch.dtype`, optional):
The dtype to use for the mask. By default, `torch.float32`.
allow_is_bidirectional_skip (`bool`, optional):
Whether to allow to return `None` for the mask under conditions where we do not have to add any bias,
i.e. full attention without any padding. Default to `False`.
use_vmap (`bool`, optional):
Whether to use `vmap` during the mask construction or not. Allows powerful custom patterns that may not be
index-based (for the cost of speed performance). By default `False`.
"""
# The masks for eager attention are simply boolean mask from sdpa, casted to 0 and -inf
_ = kwargs.pop("allow_is_causal_skip", None)
_ = kwargs.pop("allow_torch_fix", None)
mask = sdpa_mask(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_function,
attention_mask=attention_mask,
allow_is_causal_skip=False,
allow_is_bidirectional_skip=allow_is_bidirectional_skip,
allow_torch_fix=False,
use_vmap=use_vmap,
**kwargs,
)
# only bidirectional masks can be skipped, otherwise we convert bool -> float
if mask is not None:
min_dtype = torch.finfo(dtype).min
# we need 0s where the tokens should be taken into account, and -inf otherwise (mask is already of boolean type)
mask = torch.where(mask, torch.tensor(0.0, device=mask.device, dtype=dtype), min_dtype)
return mask
def flash_attention_mask(
batch_size: int,
cache_position: torch.Tensor,
kv_length: int,
kv_offset: int = 0,
mask_function: Callable = causal_mask_function,
attention_mask: torch.Tensor | None = None,
**kwargs,
):
"""
Create the attention mask necessary to use FA2. Since FA2 is un-padded by definition, here we simply return
`None` if the mask is fully causal, or we return the 2D mask which will then be used to extract the seq_lens.
We just slice it in case of sliding window.
Args:
batch_size (`int`):
The batch size of the input sequence.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
kv_length (`int`):
The size that the key and value states will have during the attention computation.
kv_offset (`int`, optional):
An optional offset to indicate at which first position the key and values states will refer to.
mask_function (`Callable`):
The mask factory function describing the mask pattern.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
"""
if attention_mask is not None:
# Here we need to slice from the right if using sliding or chunked (for full attention, this is equivalent to doing nothing)
attention_mask = attention_mask[:, -kv_length:]
# We only return an actual mask if there is at least 1 padding token, otherwise we return `None` and use `is_causal` in FA2
# (note that the attention_mask is a boolean dtype here)
if attention_mask.all():
attention_mask = None
return attention_mask
def flex_attention_mask(
batch_size: int,
cache_position: torch.Tensor,
kv_length: int,
kv_offset: int = 0,
mask_function: Callable = causal_mask_function,
attention_mask: torch.Tensor | None = None,
**kwargs,
) -> BlockMask:
"""
Create a 4D block mask which is a compressed representation of the full 4D block causal mask. BlockMask is essential
for performant computation of flex attention. See: https://pytorch.org/blog/flexattention/
Args:
batch_size (`int`):
The batch size of the input sequence.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
kv_length (`int`):
The size that the key and value states will have during the attention computation.
kv_offset (`int`, optional):
An optional offset to indicate at which first position the key and values states will refer to.
mask_function (`Callable`):
The mask factory function describing the mask pattern.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length)
"""
q_length, q_offset = cache_position.shape[0], cache_position[0]
# Potentially add the padding 2D mask
if attention_mask is not None:
# Older torch (2.5.x) cannot handle sequences not in multiples of 128 (default block size)
# Hence we pad to multiples of this as a minimum to ensure this
pad_len = ((attention_mask.shape[1] // flex_default_block_size) + 1) * flex_default_block_size
pad_len = pad_len - attention_mask.shape[1]
if not _is_torch_greater_or_equal_than_2_6 and pad_len > 0:
attention_mask = torch.nn.functional.pad(attention_mask, value=0, pad=(0, pad_len))
padding_mask = prepare_padding_mask(attention_mask, kv_length, kv_offset)
mask_function = and_masks(mask_function, padding_mask_function(padding_mask))
# Add the offsets on top (because flex interface only allows length, not start and end indices)
mask_function = add_offsets_to_mask_function(mask_function, q_offset, kv_offset)
# Finally create the block mask
block_mask = create_block_mask(
mask_mod=mask_function,
B=batch_size,
H=None,
Q_LEN=q_length,
KV_LEN=kv_length,
device=cache_position.device,
_compile=_is_torch_greater_or_equal_than_2_6,
)
return block_mask
class AttentionMaskInterface(GeneralInterface):
# Class instance object, so that a call to `register` can be reflected into all other files correctly, even if
# a new instance is created (in order to locally override a given function)
_global_mapping = {
"sdpa": sdpa_mask,
"eager": eager_mask,
"flash_attention_2": flash_attention_mask,
"flash_attention_3": flash_attention_mask,
"flex_attention": flex_attention_mask,
}
# Global AttentionMaskInterface shared by all models which do not need to overwrite any of the existing ones
ALL_MASK_ATTENTION_FUNCTIONS: AttentionMaskInterface = AttentionMaskInterface()
def find_packed_sequence_indices(position_ids: torch.Tensor) -> torch.Tensor | None:
"""
Find the indices of the sequence to which each new query token in the sequence belongs when using packed
tensor format (i.e. several sequences packed in the same batch dimension).
Args:
position_ids (`torch.Tensor`)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
Returns:
A 2D tensor where each similar integer indicates that the tokens belong to the same sequence. For example, if we
pack 3 sequences of 2, 3 and 1 tokens respectively along a single batch dim, this will return [[0, 0, 1, 1, 1, 2]].
If the there is only one sequence in each batch item (and we don't compile), then we return `None` indicating
no packed sequences. This is the same as [[0, 0, 0, 0, 0, 0]] for the example above.
"""
# What separate different sequences is when 2 consecutive positions_ids are separated by more than 1. So
# taking the diff (by prepending the first value - 1 to keep correct indexing) and applying cumsum to the result
# gives exactly the sequence indices
# Note that we assume that a single sequence cannot span several batch dimensions, i.e. 1 single sequence
# cannot be part of the end of the first batch dim and the start of the 2nd one for example
first_dummy_value = position_ids[:, :1] - 1 # We just need the diff on this first value to be 1
position_diff = torch.diff(position_ids, prepend=first_dummy_value, dim=-1)
packed_sequence_mask = (position_diff != 1).cumsum(-1)
# Sadly this is a dynamic control flow, so we cannot enable this check on anything compile related
if not is_tracing(packed_sequence_mask) and (packed_sequence_mask[:, -1] == 0).all():
return None
return packed_sequence_mask
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def _preprocess_mask_arguments(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | BlockMask | None,
cache_position: torch.Tensor,
past_key_values: Cache | None,
position_ids: torch.Tensor | None,
layer_idx: int | None,
) -> tuple[bool, torch.Tensor | BlockMask | None, int, int]:
"""
Perform some common pre-processing of the mask arguments we get from the modeling code. Mostly determine the
key-value length and offsets, and if we should early exit or not.
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
layer_idx (`int`, optional):
If `past_key_values` is not None, this is the layer index of the cache from which to get the key-value
length and offset. Indeed, for hybrid caches, different layers may return different lengths.
Returns:
early_exit (`bool`):
Whether we should early exit mask creation, and return the mask as-is.
attention_mask (`torch.Tensor` or `BlockMask` or `None`):
The attention mask to either return immediately, or to use in downstream mask creation.
packed_sequence_mask (`torch.Tensor`, optional):
In case we detected packed sequence format, this is a tensor where each similar integer indicates that
the tokens belong to the same sequence.
kv_length (`int`):
The size that the key and value states will have during the attention computation.
kv_offset (`int`):
An offset to indicate at which first position the key and values states will refer to.
"""
# If the mask is already 4D, simply return as-is (it was already prepared, or it is custom)
if isinstance(attention_mask, (torch.Tensor, BlockMask)) and len(attention_mask.shape) == 4:
return True, attention_mask, None, None, None
# For TGI/vLLM backends, or other custom attention without equivalent mask creation: we don't need a mask!
# Note: it's not ideal to check the `_global_mapping` attribute instead of the object itself, however otherwise
# full graph dynamo tracing (i.e. torch.export or compile with `fullgraph=True`) will fail on Python<3.11
# with `torch._dynamo.exc.Unsupported: 'inline in skipfiles:Mapping.__contains__ | __contains__, skipped
# according trace_rules.lookup SKIP_DIRS'` -- can be removed when we require Python>=3.11
if config._attn_implementation not in ALL_MASK_ATTENTION_FUNCTIONS._global_mapping:
return True, None, None, None, None
# Move the mask to correct device, and potentially switch dtype for efficiency
if attention_mask is not None and attention_mask.ndim == 2:
attention_mask = attention_mask.to(device=cache_position.device, dtype=torch.bool)
# If using a cache, it can give all information about mask sizes based on seen tokens
if past_key_values is not None:
kv_length, kv_offset = past_key_values.get_mask_sizes(cache_position, layer_idx)
# Otherwise, we infer based on our input
else:
# 1. Rely on input directly
if attention_mask is None:
kv_length, kv_offset = inputs_embeds.shape[1], 0
# 2. Rely on the mask instead - needed for special cases like prefix tuning in PEFT
#
# This is a very unique and special case where an encoder utilizes a cache and expects its length
# to be accounted for (usually, they should never use a cache). In general, the mask should always
# match with the input sizes nonetheless (i.e. it does not affect others).
# Conclusion: "prefix tuning is evil"
else:
kv_length, kv_offset = attention_mask.shape[-1], 0
# We check the position_ids for potential packed sequence format (only if the 2D attention mask is explicitly None,
# and we don't have past_key_values, i.e. generally a training setup)
packed_sequence_mask = None
if position_ids is not None and attention_mask is None and past_key_values is None:
batch_size = inputs_embeds.shape[0]
# The position ids are sometimes just unsqueezed, without being expanded
if batch_size != position_ids.shape[0]:
position_ids = position_ids.expand(batch_size, -1)
packed_sequence_mask = find_packed_sequence_indices(position_ids)
return False, attention_mask, packed_sequence_mask, kv_length, kv_offset
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def create_causal_mask(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | None,
cache_position: torch.Tensor,
past_key_values: Cache | None,
position_ids: torch.Tensor | None = None,
or_mask_function: Callable | None = None,
and_mask_function: Callable | None = None,
) -> torch.Tensor | BlockMask | None:
"""
Create a standard causal mask based on the attention implementation used (stored in the config). If `past_key_values`
has an hybrid cache structure, this function will return the mask corresponding to one of the "full_attention" layers (to align
to what is needed in the `modeling_xxx.py` files).
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the causal mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the causal mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
"""
# Power feature: if `is_causal` is False, then fallback to bi-directional mask for bi-directional attention.
# It allows to use decoder-only models with bi-directional attention as well
if not getattr(config, "is_causal", True):
return create_bidirectional_mask(
config,
inputs_embeds,
attention_mask,
or_mask_function=or_mask_function,
and_mask_function=and_mask_function,
)
# If we have an hybrid cache structure, here we want to create the mask for the full layers
if hasattr(past_key_values, "is_sliding") and False in past_key_values.is_sliding:
layer_idx = past_key_values.is_sliding.index(False)
else:
layer_idx = 0
early_exit, attention_mask, packed_sequence_mask, kv_length, kv_offset = _preprocess_mask_arguments(
config, inputs_embeds, attention_mask, cache_position, past_key_values, position_ids, layer_idx
)
if early_exit:
return attention_mask
batch_size, dtype = inputs_embeds.shape[0], inputs_embeds.dtype
mask_factory_function = causal_mask_function
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Do not allow skip if we are compiling (this is to match BC)
# TODO: cyril -> probably revisit and remove this, but a lot of tests rely on it
if _is_torch_xpu_available:
# Do not allow skip if we are compiling for decoding, but for prefill, we still allow skip to optimization the perf of 1st token generation
allow_is_causal_skip = not (getattr(past_key_values, "is_compileable", False) and cache_position.shape[0] == 1)
else:
allow_is_causal_skip = not getattr(past_key_values, "is_compileable", False)
# Allow slight deviations from causal mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_causal_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_causal_skip = False
use_vmap = True
# If we detected packing format
if packed_sequence_mask is not None:
mask_factory_function = and_masks(mask_factory_function, packed_sequence_mask_function(packed_sequence_mask))
allow_is_causal_skip = False
# We now create the mask
causal_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=allow_is_causal_skip, # additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return causal_mask
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def create_bidirectional_mask(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | None,
encoder_hidden_states: torch.Tensor | None = None,
or_mask_function: Callable | None = None,
and_mask_function: Callable | None = None,
) -> torch.Tensor | BlockMask | None:
"""
Create a standard bidirectional mask based on the attention implementation used (stored in the config).
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is only used to infer metadata
such as the batch size, query length, dtype, and device.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, kv_length).
It can also be an already prepared 4D mask of shape (batch_size, 1, query_length, kv_length),
in which case it is returned as-is.
encoder_hidden_states (`torch.Tensor`, optional):
The input embeddings of shape (batch_size, kv_length, hidden_dim). If provided, it is used instead of
`inputs_embeds` to infer the batch size, kv length and dtype.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the base mask function (by doing the union of both). This is
useful to easily overlay another mask on top, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the base mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top, for example for image tokens handling.
"""
# Due to the logic surrounding `cache_position` in inferring query-related information, we
# construct a dummy tensor imitating initial positions
cache_position = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device, dtype=torch.long)
embeds = encoder_hidden_states if encoder_hidden_states is not None else inputs_embeds
# We ignore a few irrelevant arguments at the end as we do not have a (growing) cache here
early_exit, attention_mask, _, kv_length, kv_offset = _preprocess_mask_arguments(
config, embeds, attention_mask, cache_position, None, None, 0
)
if early_exit:
return attention_mask
batch_size, dtype = embeds.shape[0], embeds.dtype
mask_factory_function = bidirectional_mask_function
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Allow skipping the mask creation except we have additional masking operators (and/or masks)
allow_is_bidirectional_skip = True
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Allow slight deviations from the base mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_bidirectional_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_bidirectional_skip = False
use_vmap = True
# We now create the mask
attention_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
# Additional kwargs for sdpa
allow_is_causal_skip=False,
allow_is_bidirectional_skip=allow_is_bidirectional_skip,
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return attention_mask
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def create_sliding_window_causal_mask(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | None,
cache_position: torch.Tensor,
past_key_values: Cache | None,
position_ids: torch.Tensor | None = None,
or_mask_function: Callable | None = None,
and_mask_function: Callable | None = None,
) -> torch.Tensor | BlockMask | None:
"""
Create a sliding window causal mask based on the attention implementation used (stored in the config). This type
of attention pattern was mostly democratized by Mistral. If `past_key_values` has an hybrid cache structure, this
function will return the mask corresponding to one of the "sliding_attention" layers (to align to what is needed in the
`modeling_xxx.py` files).
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the sliding causal mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the sliding causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the sliding causal mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the sliding causal one, for example for image tokens handling.
"""
# Power feature: if `is_causal` is False, then fallback to bi-directional mask for bi-directional attention
# It allows to use decoder-only models with bi-directional attention as well
if not getattr(config, "is_causal", True):
return create_bidirectional_sliding_window_mask(
config,
inputs_embeds,
attention_mask,
or_mask_function=or_mask_function,
and_mask_function=and_mask_function,
)
# If we have an hybrid cache structure, here we want to create the mask for the sliding layers
if hasattr(past_key_values, "is_sliding") and True in past_key_values.is_sliding:
layer_idx = past_key_values.is_sliding.index(True)
else:
layer_idx = 0
early_exit, attention_mask, packed_sequence_mask, kv_length, kv_offset = _preprocess_mask_arguments(
config, inputs_embeds, attention_mask, cache_position, past_key_values, position_ids, layer_idx
)
if early_exit:
return attention_mask
sliding_window = getattr(config, "sliding_window", None)
if sliding_window is None:
raise ValueError("Could not find a `sliding_window` argument in the config, or it is not set")
batch_size, dtype = inputs_embeds.shape[0], inputs_embeds.dtype
mask_factory_function = sliding_window_causal_mask_function(sliding_window)
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Do not allow skip if we are compiling (this is to match BC)
# TODO: cyril -> probably revisit and remove this, but a lot of tests rely on it
allow_is_causal_skip = not getattr(past_key_values, "is_compileable", False)
# Allow slight deviations from causal mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_causal_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_causal_skip = False
use_vmap = True
# If we detected packing format
if packed_sequence_mask is not None:
mask_factory_function = and_masks(mask_factory_function, packed_sequence_mask_function(packed_sequence_mask))
allow_is_causal_skip = False
# We now create the mask
causal_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=allow_is_causal_skip, # additional kwarg for sdpa
local_size=sliding_window, # Additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return causal_mask
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def create_bidirectional_sliding_window_mask(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | None,
or_mask_function: Callable | None = None,
and_mask_function: Callable | None = None,
) -> torch.Tensor | BlockMask | None:
"""
Create a standard bidirectional sliding window mask based on the attention implementation used (stored in the config).
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is only used to infer metadata
such as the batch size, query length, dtype, and device.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, kv_length).
It can also be an already prepared 4D mask of shape (batch_size, 1, query_length, kv_length),
in which case it is returned as-is.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the base mask function (by doing the union of both). This is
useful to easily overlay another mask on top, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the base mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top, for example for image tokens handling.
"""
# Due to the logic surrounding `cache_position` in inferring query-related information, we
# construct a dummy tensor imitating initial positions
cache_position = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device, dtype=torch.long)
# We ignore a few irrelevant arguments at the end as we do not have a (growing) cache here
early_exit, attention_mask, _, kv_length, kv_offset = _preprocess_mask_arguments(
config, inputs_embeds, attention_mask, cache_position, None, None, 0
)
if early_exit:
return attention_mask
sliding_window = getattr(config, "sliding_window", None)
if sliding_window is None:
raise ValueError("Could not find a `sliding_window` argument in the config, or it is not set")
batch_size, dtype = inputs_embeds.shape[0], inputs_embeds.dtype
mask_factory_function = sliding_window_bidirectional_mask_function(sliding_window)
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
use_vmap = False
allow_is_bidirectional_skip = True
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_bidirectional_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_bidirectional_skip = False
use_vmap = True
attention_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=False,
allow_is_bidirectional_skip=allow_is_bidirectional_skip,
local_size=sliding_window, # Additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return attention_mask
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def create_chunked_causal_mask(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | None,
cache_position: torch.Tensor,
past_key_values: Cache | None,
position_ids: torch.Tensor | None = None,
or_mask_function: Callable | None = None,
and_mask_function: Callable | None = None,
) -> torch.Tensor | BlockMask | None:
"""
Create a chunked attention causal mask based on the attention implementation used (stored in the config). This type
of attention pattern was mostly democratized by Llama4. If `past_key_values` has an hybrid cache structure, this
function will return the mask corresponding to one of the "chunked_attention" layers (to align to what is needed in the
`modeling_xxx.py` files).
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the chunked causal mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the chunked causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the chunked causal mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the chunked causal one, for example for image tokens handling.
"""
# If we have an hybrid cache structure, here we want to create the mask for the sliding layers
if hasattr(past_key_values, "is_sliding") and True in past_key_values.is_sliding:
layer_idx = past_key_values.is_sliding.index(True)
else:
layer_idx = 0
early_exit, attention_mask, packed_sequence_mask, kv_length, kv_offset = _preprocess_mask_arguments(
config, inputs_embeds, attention_mask, cache_position, past_key_values, position_ids, layer_idx
)
if early_exit:
return attention_mask
chunk_size = getattr(config, "attention_chunk_size", None)
if chunk_size is None:
raise ValueError("Could not find an `attention_chunk_size` argument in the config, or it is not set")
# Raise if using chunked attention on context too large with FA
if is_flash_attention_requested(config) and kv_length + kv_offset > chunk_size:
raise ValueError(
"Flash attention cannot handle chunked attention, and the key-value length is larger than the chunk size so the "
"chunked pattern cannot be respected. You should use another `attn_implementation` when instantiating the model"
)
batch_size, dtype = inputs_embeds.shape[0], inputs_embeds.dtype
# For chunked attention and batched inputs, we need to take the number of left padding tokens into account
# to start the chunk from the actual start of the sequence for the padded sequence
if attention_mask is not None:
# Only count the left padding tokens, not all of them
left_padding_tokens = (attention_mask.cumsum(dim=-1) == torch.zeros_like(attention_mask)).sum(dim=-1)
else:
left_padding_tokens = torch.zeros(batch_size, device=cache_position.device, dtype=int)
mask_factory_function = chunked_causal_mask_function(chunk_size, left_padding_tokens)
mask_interface = ALL_MASK_ATTENTION_FUNCTIONS[config._attn_implementation]
# Defaulting to using non-vmap based mask creations except when detecting
# users passing custom mask functions (as we cannot guarantee that they
# are properly index-based as required by our implementation).
use_vmap = False
# Do not allow skip if we are compiling (this is to match BC)
# TODO: cyril -> probably revisit and remove this, but a lot of tests rely on it
allow_is_causal_skip = not getattr(past_key_values, "is_compileable", False)
# Allow slight deviations from causal mask
# Note that it is very important to apply this before any other deviations of the mask (such as packed sequence mask,
# padding mask, etc) as the resulting mask may otherwise not be correct!
if or_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = or_masks(mask_factory_function, or_mask_function)
allow_is_causal_skip = False
use_vmap = True
if and_mask_function is not None:
if not _is_torch_greater_or_equal_than_2_6:
raise ValueError("Using `or_mask_function` or `and_mask_function` arguments require torch>=2.6")
mask_factory_function = and_masks(mask_factory_function, and_mask_function)
allow_is_causal_skip = False
use_vmap = True
# If we detected packing format
if packed_sequence_mask is not None:
mask_factory_function = and_masks(mask_factory_function, packed_sequence_mask_function(packed_sequence_mask))
allow_is_causal_skip = False
# We now create the mask
causal_mask = mask_interface(
batch_size=batch_size,
cache_position=cache_position,
kv_length=kv_length,
kv_offset=kv_offset,
mask_function=mask_factory_function,
attention_mask=attention_mask,
allow_is_causal_skip=allow_is_causal_skip, # additional kwarg for sdpa
local_size=chunk_size, # Additional kwarg for sdpa
dtype=dtype, # Additional kwarg for eager
config=config, # Pass the config as well, in case someone wants to easily have their own mask_interface
use_vmap=use_vmap, # Short-circuit to non-vmap expansions for the mask
)
return causal_mask
LAYER_PATTERN_TO_MASK_FUNCTION_MAPPING = {
"full_attention": create_causal_mask,
"sliding_attention": create_sliding_window_causal_mask,
"chunked_attention": create_chunked_causal_mask,
}
@deprecate_kwarg("input_embeds", version="5.6.0", new_name="inputs_embeds")
def create_masks_for_generate(
config: PreTrainedConfig,
inputs_embeds: torch.Tensor,
attention_mask: torch.Tensor | None,
cache_position: torch.Tensor,
past_key_values: Cache | None,
position_ids: torch.Tensor | None = None,
or_mask_function: Callable | None = None,
and_mask_function: Callable | None = None,
**kwargs,
):
"""
This function mimics how we create the masks in the `modeling_xxx.py` files, and is used in places like `generate`
in order to easily create the masks in advance, when we compile the forwards with Static caches.
Args:
config (`PreTrainedConfig`):
The model config.
inputs_embeds (`torch.Tensor`):
The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the
batch size, query length and dtype.
attention_mask (`torch.Tensor`, optional):
The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).
It can also be an already prepared 4D mask, in which case it is returned as-is.
cache_position (`torch.Tensor`):
A tensor of shape (query_length,) indicating the current indices of the input sequence elements.
past_key_values (`Cache`, optional):
The past key values, if we use a cache.
position_ids (`torch.Tensor`, optional)
A 2D tensor of shape (batch_size, query_length) indicating the positions of each token in the sequences.
or_mask_function (`Callable`, optional):
An optional mask function to combine with the other mask function (by doing the union of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
and_mask_function (`Callable`, optional):
An optional mask function to combine with the other mask function (by doing the intersection of both). This is
useful to easily overlay another mask on top of the causal one, for example for image tokens handling.
"""
# The attribute reside in the text config for composite models
effective_config = config.get_text_config()
# Prepare the mask args
mask_kwargs = {
"config": effective_config,
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
"or_mask_function": or_mask_function,
"and_mask_function": and_mask_function,
}
# If the attribute exist, we need several masks
if hasattr(effective_config, "layer_types"):
causal_masks = {}
for layer_pattern in set(effective_config.layer_types):
causal_masks[layer_pattern] = LAYER_PATTERN_TO_MASK_FUNCTION_MAPPING[layer_pattern](**mask_kwargs)
return causal_masks
# In this case, all layers are sliding
elif getattr(effective_config, "sliding_window", None) is not None:
return create_sliding_window_causal_mask(**mask_kwargs)
# In this case, all layers are chunked
elif getattr(effective_config, "attention_chunk_size", None) is not None:
return create_chunked_causal_mask(**mask_kwargs)
# All layers use standard causal attention
return create_causal_mask(**mask_kwargs)
# Below are utilities to pretty-print the different masks
# Print the matrix with words as row labels
GREEN = "\033[92m"
YELLOW = "\033[93m"
RESET = "\033[0m"
BLACK_SQUARE = "■"
WHITE_SQUARE = "⬚"
GREY_SQUARE = "∙"
LOW_TRIANGLE = "⬕"
UPPER_TRIANGLE = "⬔"
def get_style(style):
if style == "majong":
BLACK_SQUARE = "🀞" # Full block (represents "on" or active)
BLACK_SQUARE = "🀙" # Full block (represents "on" or active)
WHITE_SQUARE = "🀆" # "▒" # Light shade (represents "off" or inactive)
LOW_TRIANGLE = "🀛" # Lower left triangle (stylized indication)
UPPER_TRIANGLE = "🀛" # Upper left triangle (stylized indication)
else:
BLACK_SQUARE = "█" # Full block (represents "on" or active)
WHITE_SQUARE = "░" # "▒" # Light shade (represents "off" or inactive)
LOW_TRIANGLE = "▙" # Lower left triangle (stylized indication))
UPPER_TRIANGLE = "▜" # Upper left triangle (stylized indication)
return BLACK_SQUARE, WHITE_SQUARE, LOW_TRIANGLE, UPPER_TRIANGLE
# LOW_TRIANGLE = UPPER_TRIANGLE = "⟍" # Upper right triangle (stylized indication)
YELLOW_SQUARE = f"{YELLOW}{BLACK_SQUARE}{RESET}"
GREEN_SQUARE = f"{GREEN}{BLACK_SQUARE}{RESET}"
def tensor_to_mask_visual(original_tensor: torch.Tensor, grid_size=(20, 40), style="majong") -> str:
BLACK_SQUARE, WHITE_SQUARE, LOW_TRIANGLE, UPPER_TRIANGLE = get_style(style)
h, w = original_tensor.shape
max_h, max_w = grid_size
if not (h < max_h and w < max_w):
# Preserve aspect ratio within max grid size
aspect_ratio = 2 * w / h
if aspect_ratio > 1:
w = max_w
h = min(max_h, max(1, round(max_w / aspect_ratio)))
else:
h = max_h
w = max(1, round(max_h * aspect_ratio))
# Step 1: Rescale tensor by average pooling
tensor = original_tensor.unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions
tensor = F.adaptive_avg_pool2d(tensor, output_size=(h, w))[0, 0] # Remove extra dims
else:
tensor = original_tensor
# Step 3: Build the string representation
result = []
for i in range(h):
row = ""
for j in range(w):
if tensor[i, j] == 1:
row += BLACK_SQUARE
elif tensor[i, j] == 0:
row += WHITE_SQUARE
else:
if j > 0:
if tensor[i, j - 1] == 1:
row += LOW_TRIANGLE
elif tensor[i, j - 1] == 0:
row += UPPER_TRIANGLE
else:
row += BLACK_SQUARE if tensor[i, j] == 1 else WHITE_SQUARE
else:
row += (
BLACK_SQUARE
if tensor[i, j] == 1
else (
WHITE_SQUARE
if tensor[i, j] == 0
else (UPPER_TRIANGLE if tensor[i, j + 1] == 1 else LOW_TRIANGLE)
)
)
result.append(row)
return "\n".join(result)
class AttentionMask(torch.Tensor):
def __new__(cls, data, style=None):
# Create a new instance of AttentionMask as a Tensor
cls.style = style
return torch.Tensor._make_subclass(cls, data, require_grad=False)
def __init__(self, data):
# You can initialize any additional metadata here if needed
pass
def to_string(self, grid_size=(20, 40), limit=4):
"""Returns a string representation of the block mask."""
dense_mask = self
*batch_dims, num_rows, num_cols = dense_mask.shape
total_vis = []
for idx, batch_idx in enumerate(itertools.product(*[range(i) for i in batch_dims])):
if idx == limit:
total_vis.append("...")
total_vis.append("To print out more, set AttentionMask.to_string(limit=N)")
total_vis.append("You can also index (AttentionMask[batch, head]) to choose a specific batch or head")
break
block_vis = tensor_to_mask_visual(dense_mask[batch_idx], grid_size=grid_size, style=self.style)
total_vis.append(block_vis)
total_vis.append(f"torch.Tensor(shape={tuple(self.shape)}, dtype={self.dtype})")
return "\n".join(total_vis)
def __repr__(self):
return self.to_string()
def __str__(self):
return self.to_string()
@classmethod
def from_tensor(cls, tensor: torch.Tensor, style: str | None = None) -> "AttentionMask":
res = cls(tensor)
res.style = style
return res
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/masking_utils.py",
"license": "Apache License 2.0",
"lines": 1326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/falcon_h1/configuration_falcon_h1.py | # Copyright 2025 TII and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FalconH1 model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class FalconH1Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FalconH1Model`]. It is used to instantiate a
FalconH1Model model according to the specified arguments, defining the model architecture. Instantiating a configuration
with defaults taken from [ibm-fms/FalconH1-9.8b-2.2T-hf](https://huggingface.co/ibm-fms/FalconH1-9.8b-2.2T-hf).
The FalconH1Model is a hybrid [mamba2](https://github.com/state-spaces/mamba) architecture with SwiGLU.
The checkpoints are jointly trained by IBM, Princeton, and UIUC.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128000):
Vocabulary size of the FalconH1 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconH1Model`]
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 8192):
Max cached sequence length for the model
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mamba_d_ssm (`int`, *optional*, defaults to 1024):
The dimension of the SSM state space latents.
mamba_n_heads (`int`, *optional*, defaults to 128):
The number of mamba heads used in the v2 implementation.
mamba_d_head (`int`, *optional*, defaults to `"auto"`):
Head embeddding dimension size
mamba_n_groups (`int`, *optional*, defaults to 1):
The number of the mamba groups used in the v2 implementation.
mamba_d_state (`int`, *optional*, defaults to 256):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_chunk_size (`int`, *optional*, defaults to 256):
The chunks in which to break the sequence when doing prefill/training
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
mamba_norm_before_gate (`bool`, *optional*, defaults to `True`):
Whether to use RMSNorm before the gate in the Mamba block
mamba_rms_norm (`bool`, *optional*, defaults to `False`):
Whether to use RMSNorm instead of LayerNorm in the Mamba block
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`):
Accepted range of time step values for clamping.
projectors_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the attention block
rope_parameters (`float`, *optional*):
The scaling value used for the RoPE embeddings. If `None`, no scaling is applied.
lm_head_multiplier (`float`, *optional*, defaults to 1.0):
The multiplier for the LM head. This is used to scale the output of the LM head.
embedding_multiplier (`float`, *optional*, defaults to 1.0):
The multiplier for the embedding layer. This is used to scale the output of the embedding layer.
mlp_multipliers (`list[float]`, *optional*):
The multipliers for the MLP layers. This is used to scale the output of the MLP layers. The first value is
the multiplier of gate layer, the second value is the multiplier of the down_proj layer.
key_multiplier (`float`, *optional*):
The multiplier for the key layer. This is used to scale the output of the key layer.
attention_out_multiplier (`float`, *optional*):
The multiplier for the attention output layer. This is used to scale the output of the attention output
attention_in_multiplier (`float`, *optional*):
The multiplier for the attention input layer. This is used to scale the output of the attention input layer.
ssm_multipliers (`list[float]`, *optional*):
The multipliers for the SSM layers. This is used to scale the output of the SSM layers.
ssm_in_multiplier (`float`, *optional*):
The multiplier for the SSM input layer. This is used to scale the output of the SSM input layer.
ssm_out_multiplier (`float`, *optional*):
The multiplier for the SSM output layer. This is used to scale the output of the SSM output layer.
"""
model_type = "falcon_h1"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int | None = 128000,
tie_word_embeddings: bool | None = False,
hidden_size: int | None = 4096,
intermediate_size: int | None = 14336,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 8,
hidden_act: str | None = "silu",
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: int | None = True,
num_logits_to_keep: int | None = 1,
pad_token_id: int | None = 0,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
max_position_embeddings: int | None = 8192,
attention_dropout: float | None = 0.0,
mamba_d_ssm: int | None = 1024,
mamba_n_heads: int | None = 128,
mamba_d_head: str | None = "auto",
mamba_n_groups: int | None = 1,
mamba_d_state: int | None = 256,
mamba_d_conv: int | None = 4,
mamba_expand: int | None = 2,
mamba_chunk_size: int | None = 256,
mamba_conv_bias: bool | None = True,
mamba_proj_bias: bool | None = False,
mamba_norm_before_gate: bool | None = True,
mamba_rms_norm: bool | None = False,
time_step_min: float | None = 0.001,
time_step_max: float | None = 0.1,
time_step_limit: tuple[float, float] | None = (0.0, float("inf")),
projectors_bias: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
lm_head_multiplier: float | None = 1.0,
embedding_multiplier: float | None = 1.0,
mlp_multipliers: int | None = None,
key_multiplier: int | None = None,
attention_out_multiplier: int | None = None,
attention_in_multiplier: int | None = None,
ssm_multipliers: int | None = None,
ssm_in_multiplier: int | None = None,
ssm_out_multiplier: int | None = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.attention_dropout = attention_dropout
self.attention_bias = False
self.mlp_bias = False
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.num_logits_to_keep = num_logits_to_keep
self.projectors_bias = projectors_bias
mamba_intermediate = mamba_expand * hidden_size if mamba_d_ssm is None else mamba_d_ssm
if mamba_intermediate % mamba_n_heads != 0:
raise ValueError("mamba_n_heads must divide mamba_expand * hidden_size")
# for the mamba_v2, must satisfy the following
if mamba_d_head == "auto":
mamba_d_head = mamba_intermediate // mamba_n_heads
if mamba_d_head * mamba_n_heads != mamba_intermediate:
raise ValueError("The dimensions for the Mamba head state do not match the model intermediate_size")
self.mamba_d_ssm = mamba_d_ssm
self.mamba_n_heads = mamba_n_heads
self.mamba_d_head = mamba_d_head
self.mamba_n_groups = mamba_n_groups
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
self.mamba_conv_bias = mamba_conv_bias
self.mamba_proj_bias = mamba_proj_bias
self.mamba_norm_before_gate = mamba_norm_before_gate
self.mamba_rms_norm = mamba_rms_norm
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_limit = tuple(time_step_limit) if time_step_limit is not None else None
self.lm_head_multiplier = lm_head_multiplier
self.embedding_multiplier = embedding_multiplier
if mlp_multipliers is not None:
self.mlp_multipliers = mlp_multipliers
else:
self.mlp_multipliers = [1.0, 1.0]
if attention_out_multiplier is not None:
self.attention_out_multiplier = attention_out_multiplier
else:
self.attention_out_multiplier = 1.0
if attention_in_multiplier is not None:
self.attention_in_multiplier = attention_in_multiplier
else:
self.attention_in_multiplier = 1.0
if key_multiplier is not None:
self.key_multiplier = key_multiplier
else:
self.key_multiplier = 1.0
if ssm_multipliers is not None:
self.ssm_multipliers = ssm_multipliers
else:
self.ssm_multipliers = [1.0, 1.0, 1.0, 1.0, 1.0]
if ssm_in_multiplier is not None:
self.ssm_in_multiplier = ssm_in_multiplier
else:
self.ssm_in_multiplier = 1.0
if ssm_out_multiplier is not None:
self.ssm_out_multiplier = ssm_out_multiplier
else:
self.ssm_out_multiplier = 1.0
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
@property
def layers_block_type(self):
return ["attention" for i in range(self.num_hidden_layers)]
__all__ = ["FalconH1Config"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/falcon_h1/configuration_falcon_h1.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/falcon_h1/convert_mamba_ssm_checkpoint.py | # Copyright 2025 TII and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script can be used to convert checkpoints provided in the `mamba_ssm` library into the format provided in HuggingFace `transformers`. It depends on the `mamba2_ssm` package to be installed."""
import argparse
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, FalconH1Config, FalconH1ForCausalLM
CONVERSION_MAPPING = {
"backbone": "model",
"embeddings": "embed_tokens",
"mixer.": "",
"mixer_ssm": "mamba",
"mixer_attn": "self_attn",
"mlp.": "feed_forward.",
"mlp_norm": "pre_ff_layernorm",
"ssm_proj": "mamba.in_proj",
"attn_out_proj": "o_proj",
".norm.": ".input_layernorm.",
".mamba.input_layernorm.": ".mamba.norm.",
".ssm_out_proj.": ".mamba.out_proj.",
"norm_f": "final_layernorm",
}
def convert_falcon_h1_to_hf(input_model_path, output_path):
tokenizer = AutoTokenizer.from_pretrained(input_model_path)
model = AutoModelForCausalLM.from_pretrained(input_model_path, dtype=torch.bfloat16, trust_remote_code=True)
intermediate_size = int(model.config.expansion_factor * model.config.hidden_size)
if intermediate_size % 2 != 0:
intermediate_size = intermediate_size + (intermediate_size % 2)
new_config = FalconH1Config(
vocab_size=model.config.vocab_size,
tie_word_embeddings=model.config.tie_word_embeddings,
hidden_size=model.config.hidden_size,
intermediate_size=intermediate_size,
mamba_d_state=model.config.state_size,
num_hidden_layers=model.config.num_hidden_layers,
mamba_use_mlp=model.config.use_mlp,
rms_norm_eps=model.config.layer_norm_epsilon,
pad_token_id=model.config.pad_token_id,
eos_token_id=model.config.eos_token_id,
mamba_expand=model.config.expand,
mamba_d_conv=model.config.conv_kernel,
mamba_n_groups=model.config.n_groups,
mamba_n_heads=model.config.num_heads,
mamba_norm_before_gate=model.config.norm_before_gate,
mamba_rms_norm=model.config.rms_norm,
mamba_d_ssm=model.config.d_ssm,
attention_bias=model.config.use_bias,
projectors_bias=model.config.use_bias,
mamba_conv_bias=model.config.use_conv_bias,
hidden_act=model.config.hidden_act,
use_cache=model.config.use_cache,
mamba_chunk_size=model.config.chunk_size,
num_attention_heads=model.config.num_heads_mha,
num_key_value_heads=model.config.num_key_value_heads,
head_dim=model.config.head_dim_mha,
lm_head_multiplier=model.config.lm_head_multiplier,
embedding_multiplier=model.config.embedding_multiplier,
mlp_multipliers=model.config.mlp_multipliers,
key_multiplier=model.config.key_multiplier,
attention_out_multiplier=model.config.attention_out_multiplier,
attention_in_multiplier=model.config.attention_in_multiplier,
ssm_multipliers=model.config.ssm_multipliers,
ssm_in_multiplier=model.config.ssm_in_multiplier,
ssm_out_multiplier=model.config.ssm_out_multiplier,
rope_theta=model.config.rope_theta,
)
old_state_dict = model.state_dict()
new_state_dict = {}
for old_key, old_value in old_state_dict.items():
new_key = old_key
for conversion_key, conversion_value in CONVERSION_MAPPING.items():
if conversion_key in old_key:
new_key = new_key.replace(conversion_key, conversion_value)
if "mamba.input_layernorm" in new_key:
new_key = new_key.replace("mamba.input_layernorm", "mamba.norm")
# Special processing for attention layers
if "self_attn.attn_proj" in new_key:
num_heads = new_config.num_attention_heads
num_kv_heads = new_config.num_key_value_heads
head_dim = new_config.head_dim
q_proj, k_proj, v_proj = old_value.split(
[
num_heads * head_dim,
num_kv_heads * head_dim,
num_kv_heads * head_dim,
],
dim=0,
)
new_state_dict[new_key.replace("attn_proj", "q_proj")] = q_proj
new_state_dict[new_key.replace("attn_proj", "k_proj")] = k_proj
new_state_dict[new_key.replace("attn_proj", "v_proj")] = v_proj
else:
new_state_dict[new_key] = old_value
with torch.device("meta"):
new_model = FalconH1ForCausalLM(new_config)
del model
new_model.load_state_dict(new_state_dict, strict=True, assign=True)
new_model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--mamba_ssm_checkpoint_directory",
type=str,
required=True,
help="Path to a directory containing the `pytorch_model.bin` mamba_ssm checkpoint file to be converted.",
)
parser.add_argument(
"-o", "--output_dir", type=str, required=True, help="Path to directory to save the converted output model to."
)
args = parser.parse_args()
convert_falcon_h1_to_hf(
args.mamba_ssm_checkpoint_directory,
args.output_dir,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/falcon_h1/convert_mamba_ssm_checkpoint.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/falcon_h1/modular_falcon_h1.py | # Copyright 2025 Technology Innovation Institute and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch FalconH1 model."""
from collections.abc import Callable
from typing import Any
import torch
import torch.nn.functional as F
from torch import nn
from transformers.activations import ACT2FN
from transformers.models.jamba.modeling_jamba import HybridMambaAttentionDynamicCache
from transformers.models.llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaMLP,
LlamaRMSNorm,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from transformers.models.mamba2.modeling_mamba2 import (
MambaRMSNormGated,
apply_mask_to_padding_states,
pad_tensor_by_size,
reshape_into_chunks,
segment_sum,
)
from ... import initialization as init
from ...cache_utils import Cache
from ...integrations.hub_kernels import lazy_load_kernel
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from ...utils.import_utils import resolve_internal_import
from .configuration_falcon_h1 import FalconH1Config
logger = logging.get_logger(__name__)
class FalconHybridMambaAttentionDynamicCache(HybridMambaAttentionDynamicCache):
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
def __init__(
self,
config: FalconH1Config,
batch_size: int,
dtype: torch.dtype = torch.float16,
devices: list[str] | None = None,
):
self.seqlen_offset = 0
self.dtype = dtype
self.has_previous_state = False
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)
)
self.conv_states = {
i: torch.zeros(
batch_size,
self.intermediate_size + 2 * config.mamba_n_groups * config.mamba_d_state,
self.conv_kernel_size,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.ssm_states = {
i: torch.zeros(
batch_size,
config.mamba_n_heads,
config.mamba_d_head,
config.mamba_d_state,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.transformer_layers = []
for i in range(config.num_hidden_layers):
self.transformer_layers.append(i)
self.key_cache: list[torch.Tensor] = []
self.value_cache: list[torch.Tensor] = []
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: dict[str, Any] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if len(self.key_cache) <= layer_idx:
# There may be skipped layers, fill them with empty lists
for _ in range(len(self.key_cache), layer_idx):
self.key_cache.append([])
self.value_cache.append([])
self.key_cache.append(key_states)
self.value_cache.append(value_states)
elif len(self.key_cache[layer_idx]) == 0: # fills previously skipped layers; checking for tensor causes errors
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def update_conv_state(
self,
layer_idx: int,
new_conv_state: torch.Tensor,
cache_position: torch.LongTensor,
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
if len(cache_position) > 1:
conv_state[:, :, :] = new_conv_state.to(conv_state.device)
else:
conv_state[:, :, -1] = new_conv_state[:, :, -1].to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
class FalconH1RotaryEmbedding(LlamaRotaryEmbedding):
pass
class FalconH1Attention(LlamaAttention):
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.key_multiplier = config.key_multiplier
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) * self.key_multiplier
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class FalconH1RMSNormGated(MambaRMSNormGated):
def __init__(self, hidden_size, eps=1e-6, n_groups=1, norm_before_gate=True):
super().__init__(hidden_size=hidden_size, eps=eps)
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
self.n_groups = n_groups
self.norm_before_gate = norm_before_gate
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
if not self.norm_before_gate and gate is not None:
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
if len(hidden_states.shape) == 3:
batch_size, seq_len, dim = hidden_states.shape
else:
batch_size, dim = hidden_states.shape
seq_len = 1
hidden_states = hidden_states.to(torch.float32)
hidden_states = hidden_states.view(batch_size, seq_len, self.n_groups, int(dim // self.n_groups))
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight.view(self.n_groups, int(dim // self.n_groups)) * hidden_states
hidden_states = hidden_states.view(batch_size, seq_len, dim)
if seq_len == 1:
hidden_states = hidden_states.squeeze(1)
if self.norm_before_gate and gate is not None:
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
return hidden_states.to(input_dtype)
# Adapted from transformers.models.mamba2.modeling_mamba2.Mamba2Mixer
class FalconH1Mixer(nn.Module):
"""
FalconH1Mixer is identical to classic Mamba2 mixer classes but differs on two different things
- Users can pass custom intermediate_size through `config.mamba_d_ssm`
- The use of gated RMS normalization layer is optional
"""
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__()
self.num_heads = config.mamba_n_heads
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
int(config.mamba_expand * self.hidden_size) if config.mamba_d_ssm is None else config.mamba_d_ssm
)
self.layer_idx = layer_idx
self.use_conv_bias = config.mamba_conv_bias
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_bias = config.mamba_proj_bias
self.layer_norm_epsilon = config.rms_norm_eps
self.groups_time_state_size = config.mamba_n_groups * self.ssm_state_size
self.n_groups = config.mamba_n_groups
self.head_dim = config.mamba_d_head
self.chunk_size = config.mamba_chunk_size
self.time_step_limit = config.time_step_limit
self.time_step_min = config.time_step_min
self.time_step_max = config.time_step_max
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=config.mamba_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.conv_dim,
padding=self.conv_kernel_size - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=self.use_bias,
)
# selective projection used to make dt, B and C input dependant
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.mamba_rms_norm = config.mamba_rms_norm
if self.mamba_rms_norm:
self.norm = FalconH1RMSNormGated(
self.intermediate_size,
eps=self.layer_norm_epsilon,
n_groups=self.n_groups,
norm_before_gate=config.mamba_norm_before_gate,
)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=config.projectors_bias)
global causal_conv1d_update, causal_conv1d_fn
causal_conv1d = lazy_load_kernel("causal-conv1d")
causal_conv1d_update = getattr(causal_conv1d, "causal_conv1d_update", None)
causal_conv1d_fn = getattr(causal_conv1d, "causal_conv1d_fn", None)
global selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
mamba_ssm = lazy_load_kernel("mamba-ssm")
selective_state_update = resolve_internal_import(
mamba_ssm, chained_path="ops.triton.selective_state_update.selective_state_update"
)
mamba_chunk_scan_combined = resolve_internal_import(
mamba_ssm, chained_path="ops.triton.ssd_combined.mamba_chunk_scan_combined"
)
mamba_split_conv1d_scan_combined = resolve_internal_import(
mamba_ssm, chained_path="ops.triton.ssd_combined.mamba_split_conv1d_scan_combined"
)
global is_fast_path_available
is_fast_path_available = all(
(
selective_state_update,
mamba_chunk_scan_combined,
mamba_split_conv1d_scan_combined,
causal_conv1d_fn,
causal_conv1d_update,
)
)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
else:
logger.warning_once("The fast path for FalconH1 will be used when running the model on a GPU")
self.zxbcdt_multipliers = config.ssm_multipliers
self.ssm_in_multiplier = config.ssm_in_multiplier
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: FalconHybridMambaAttentionDynamicCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
# 1. Gated MLP's linear projection
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
# Add Multipliers
hidden_states = hidden_states * self.ssm_in_multiplier
projected_states = self.in_proj(hidden_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
# Set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# getting projected states from cache if it exists
if use_precomputed_states:
d_mlp = (projected_states.squeeze(1).shape[-1] - d_to_remove) // 2
z0, x0, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# 2. Convolution sequence transformation
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=gate.view(batch_size, self.num_heads, self.head_dim) if not self.mamba_rms_norm else None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
if self.mamba_rms_norm:
hidden_states = self.norm(hidden_states, gate)
if d_mlp > 0:
hidden_states = torch.cat([F.silu(z0) * x0, hidden_states], dim=-1)
# 4. Final linear projection
out = self.out_proj(hidden_states[:, None, ...])
# Fused calculations or step by step if no initialized cache is found
else:
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
# 2-4. Fused kernel for conv1d, SSM, and the final projection
if self.training and cache_params is None:
out = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
chunk_size=self.chunk_size,
seq_idx=None, # was seq_idx
activation=self.activation,
rmsnorm_weight=self.norm.weight if self.mamba_rms_norm else None,
rmsnorm_eps=self.norm.variance_epsilon if self.mamba_rms_norm else None,
outproj_weight=self.out_proj.weight,
outproj_bias=self.out_proj.bias,
headdim=self.head_dim,
ngroups=self.n_groups,
norm_before_gate=False,
return_final_states=False,
**dt_limit_kwargs,
)
else:
d_mlp = (
projected_states.shape[-1]
- 2 * self.intermediate_size
- 2 * self.n_groups * self.ssm_state_size
- self.num_heads
) // 2
if attention_mask is not None:
projected_states = projected_states * attention_mask[..., None]
_, gate, hidden_states_B_C, dt = projected_states.split(
[
2 * d_mlp,
self.intermediate_size,
self.conv_dim,
self.num_heads,
],
dim=-1,
)
if cache_params is not None:
conv_states = F.pad(
hidden_states_B_C.permute(0, 2, 1),
(self.conv_kernel_size - hidden_states_B_C.shape[-2], 0),
)
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
time_step = nn.functional.softplus(dt + self.dt_bias)
# 1D Convolution
if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
hidden_states_B_C = self.act(
self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
) # (B, L, self.d_inner + 2 * ngroups * d_state)
else:
hidden_states_B_C = causal_conv1d_fn(
x=hidden_states_B_C.transpose(1, 2),
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(
hidden_states_B_C,
[
self.intermediate_size,
groups_time_state_size,
groups_time_state_size,
],
dim=-1,
)
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
# This is a hack to make sure multi-GPU inference works with HF accelerate
# see: https://github.com/Dao-AILab/flash-attention/issues/523 for more details
with torch.cuda.device(hidden_states.device):
scan_output, ssm_state = mamba_chunk_scan_combined(
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
time_step,
A,
B.view(batch_size, seq_len, self.n_groups, -1),
C.view(batch_size, seq_len, self.n_groups, -1),
chunk_size=self.chunk_size,
D=self.D,
z=None,
seq_idx=None,
return_final_states=True,
**dt_limit_kwargs,
)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
# Multiply "gate" branch and apply extra normalization layer
if self.mamba_rms_norm:
out = self.norm(scan_output, gate)
else:
out = scan_output * torch.nn.functional.silu(gate)
out = self.out_proj(out)
return out
# fmt: off
def torch_forward(
self,
input_states,
cache_params: FalconHybridMambaAttentionDynamicCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
input_states = apply_mask_to_padding_states(input_states, attention_mask)
# Add Multipliers
input_states = input_states * self.ssm_in_multiplier
projected_states = self.in_proj(input_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
gate, hidden_states_B_C, dt = projected_states.split([
self.intermediate_size, self.conv_dim, self.num_heads
], dim=-1)
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# 2. Convolution sequence transformation
if use_precomputed_states:
cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1)
cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device)
# We need to guarantee that anything regarding the cache is on the same device
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
hidden_states_B_C = torch.sum(
conv_states * self.conv1d.weight.squeeze(1), dim=-1
)
if self.use_conv_bias:
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
hidden_states_B_C = self.act(hidden_states_B_C)
else:
# Init cache
if cache_params is not None:
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(
hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
dim=-1
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # [num_heads]
if use_precomputed_states:
# We need to guarantee that anything regarding the cache is on the same device
cache_device = cache_params.ssm_states[self.layer_idx].device
# Note: there is no need to pad parameter matrices here, as there is just one new token
# for batched generation
dt = dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
# [num_heads] -> [num_heads, head_dim]
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
# [bsz, num_heads, head_dim, state_size]
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
# Discretize B
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
# [bsz, num_heads, head_dim, state_size]
dB = dt[..., None] * B[..., None, :]
# Discretize x into dB
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
# State calculation
cache_params.ssm_states[self.layer_idx].copy_(
cache_params.ssm_states[self.layer_idx] * dA + dBx
)
# Subsequent output
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
# [bsz, num_heads, head_dim]
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
# Reshape ssm_states to merge the first two dimensions
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
# D skip connection
# [num_heads] -> [num_heads, head_dim]
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
y = y.reshape(batch_size, -1)[:, None, ...]
else:
# begin ssd naive implementation without einsums
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
# Discretize x and A
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
# Rearrange into blocks/chunks
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
# 1. Compute the output for each intra-chunk (diagonal blocks)
# This is the analog of a causal mask
L = torch.exp(segment_sum(A))
# Contraction of C and B to get G (attention-weights like)
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
# Compute M, equivalent to applying attention mask to weights
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
# Compute Y_diag (apply to values)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
# 2. Compute the state for each intra-chunk
# (right term of low-rank factorization of off-diagonal blocks; B terms)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
# 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
# (middle term of factorization of off-diag blocks; A terms)
if use_precomputed_states:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
decay_chunk = decay_chunk.transpose(1, 3)
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
states, ssm_state = new_states[:, :-1], new_states[:, -1]
# 4. Compute state -> output conversion per chunk
# (left term of low-rank factorization of off-diagonal blocks; C terms)
state_decay_out = torch.exp(A_cumsum)
C_times_states = (C[..., None, :] * states[:, :, None, ...])
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
y = Y_diag + Y_off
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
# Cutting off padded chunks
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
# Init cache
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
if self.mamba_rms_norm:
scan_output = self.norm(y, gate)
else:
scan_output = y * torch.nn.functional.silu(gate)
# end ssd naive
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
return contextualized_states
# fmt: on
def forward(
self,
hidden_states,
cache_params: FalconHybridMambaAttentionDynamicCache | None = None,
cache_position: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
):
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type and not is_torchdynamo_compiling():
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
dtype = hidden_states.dtype
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
class FalconH1MLP(LlamaMLP):
def __init__(self, config: FalconH1Config):
super().__init__(config)
self.gate_multiplier, self.down_multiplier = config.mlp_multipliers
def forward(self, x):
y = self.up_proj(x) * self.act_fn(self.gate_proj(x) * self.gate_multiplier)
y = self.down_proj(y) * self.down_multiplier
return y
class FalconH1RMSNorm(LlamaRMSNorm):
pass
class FalconH1DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__()
self.feed_forward = FalconH1MLP(config)
head_dim = config.hidden_size // config.num_attention_heads
self.channels_attn = config.num_attention_heads * head_dim + 2 * config.num_key_value_heads * head_dim
self.mamba = FalconH1Mixer(config=config, layer_idx=layer_idx)
self.self_attn = FalconH1Attention(config, layer_idx)
self.attention_in_multiplier = config.attention_in_multiplier
self.ssm_out_multiplier = config.ssm_out_multiplier
self.attn_out_multiplier = config.attention_out_multiplier
self.input_layernorm = FalconH1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = FalconH1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
mamba_attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: FalconHybridMambaAttentionDynamicCache | None = None,
output_attentions: bool | None = False,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs,
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`FalconHybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
mamba_hidden_states = self.mamba(
hidden_states=hidden_states,
cache_params=past_key_values,
cache_position=cache_position,
attention_mask=mamba_attention_mask,
)
mamba_hidden_states = mamba_hidden_states * self.ssm_out_multiplier
attention_hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states * self.attention_in_multiplier,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
attention_hidden_states = attention_hidden_states * self.attn_out_multiplier
hidden_states = mamba_hidden_states + attention_hidden_states
# residual connection after attention
hidden_states = residual + hidden_states
# feed-forward
residual = hidden_states
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
class FalconH1PreTrainedModel(PreTrainedModel):
config: FalconH1Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["FalconH1DecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_is_stateful = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, FalconH1Mixer):
init.ones_(module.dt_bias)
init.copy_(module.A_log, torch.log(torch.arange(1, module.num_heads + 1)))
init.ones_(module.D)
elif isinstance(module, FalconH1Model):
mup_vector = compute_mup_vector(module.config)
for layer in module.layers:
init.copy_(layer.mamba.mup_vector, mup_vector)
def compute_mup_vector(config):
"""
Computes the MuP vector based on model configuration.
FalconH1 applies different MuP multiplier for each dimension of the hidden states.
The MuP vector is partitioned into chunks, and each chunk is multiplied with its
corresponding projected dimension.
Args:
config: FalconH1Config object
Returns:
torch.Tensor: The computed MuP vector
"""
# We'll need some values from the config to compute the vector dimensions
intermediate_size = (
config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)
)
groups_time_state_size = config.mamba_n_groups * config.mamba_d_state
num_heads = config.mamba_n_heads
zxbcdt_multipliers = config.ssm_multipliers
vector_shape = 2 * intermediate_size + 2 * groups_time_state_size + num_heads
mup_vector = torch.ones(1, 1, vector_shape)
# Apply multipliers to different sections of the vector
mup_vector[:, :, :intermediate_size] *= zxbcdt_multipliers[0]
mup_vector[:, :, intermediate_size : 2 * intermediate_size] *= zxbcdt_multipliers[1]
mup_vector[:, :, 2 * intermediate_size : 2 * intermediate_size + groups_time_state_size] *= zxbcdt_multipliers[2]
mup_vector[
:, :, 2 * intermediate_size + groups_time_state_size : 2 * intermediate_size + 2 * groups_time_state_size
] *= zxbcdt_multipliers[3]
mup_vector[:, :, 2 * intermediate_size + 2 * groups_time_state_size :] *= zxbcdt_multipliers[4]
return mup_vector
@auto_docstring
# Adapted from transformers.models.jamba.modeling_jamba.JambaModel
class FalconH1Model(FalconH1PreTrainedModel):
def __init__(self, config: FalconH1Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
decoder_layers = []
for i in range(config.num_hidden_layers):
decoder_layers.append(FalconH1DecoderLayer(config, layer_idx=i))
self.layers = nn.ModuleList(decoder_layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = FalconH1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = FalconH1RotaryEmbedding(config=config)
self.embedding_multiplier = config.embedding_multiplier
self.lm_head_multiplier = config.lm_head_multiplier
self.gradient_checkpointing = False
# Compute the MuP vector once and register it for all layers
mup_vector = compute_mup_vector(config)
for layer in self.layers:
layer.mamba.register_buffer("mup_vector", mup_vector.clone(), persistent=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: FalconHybridMambaAttentionDynamicCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs, # NOOP kwargs, for now
) -> tuple | BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embedding_multiplier
hidden_states = inputs_embeds
if use_cache and past_key_values is None:
logger.warning_once(
"FalconH1 requires an initialized `FalconHybridMambaAttentionDynamicCache` to return a cache. None was "
"provided, so no cache will be returned."
)
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
mamba_attention_mask=mamba_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
# append attentions only of attention layers. Mamba layers return `None` as the attention weights
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
next_cache = None if not use_cache else past_key_values
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
mamba_mask = None
return mamba_mask
class FalconH1ForCausalLM(LlamaForCausalLM):
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: FalconHybridMambaAttentionDynamicCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs,
) -> tuple | CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, FalconH1ForCausalLM
>>> model = FalconH1ForCausalLM.from_pretrained("...")
>>> tokenizer = AutoTokenizer.from_pretrained("...")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :]) * self.model.lm_head_multiplier
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- has a unique cache type, `FalconHybridMambaAttentionDynamicCache`
if past_key_values is None:
past_key_values = FalconHybridMambaAttentionDynamicCache(
self.config,
input_ids.shape[0],
self.dtype,
devices=[
self.model.layers[i].mamba.conv1d.weight.device for i in range(self.config.num_hidden_layers)
],
)
kwargs["logits_to_keep"] = self.config.num_logits_to_keep
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
is_first_iteration=is_first_iteration,
**kwargs,
)
return model_inputs
__all__ = ["FalconH1Model", "FalconH1ForCausalLM", "FalconH1PreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/falcon_h1/modular_falcon_h1.py",
"license": "Apache License 2.0",
"lines": 1051,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/falcon_h1/test_modeling_falcon_h1.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch FalconH1 model."""
import unittest
import pytest
from transformers import FalconH1Config, is_torch_available
from transformers.testing_utils import (
Expectations,
get_device_properties,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, FalconH1ForCausalLM, FalconH1Model
from transformers.models.falcon_h1.modeling_falcon_h1 import (
FalconHybridMambaAttentionDynamicCache,
)
class FalconH1ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=64,
hidden_act="silu",
attention_dropout=0.0,
attn_layer_indices=None,
attn_rotary_emb=8,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
num_labels=3,
pad_token_id=0,
mamba_n_groups=1,
mamba_n_heads=16,
mamba_d_state=16,
mamba_d_conv=4,
mamba_expand=2,
mamba_chunk_size=16,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.attn_layer_indices = attn_layer_indices
self.attn_rotary_emb = attn_rotary_emb
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.pad_token_id = pad_token_id
self.scope = scope
self.mamba_n_groups = mamba_n_groups
self.mamba_n_heads = mamba_n_heads
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return config, input_ids, input_mask, token_labels
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
def get_config(self):
# Fix for SDPA tests, force at least 4 layers
if self.num_hidden_layers < 4:
self.num_hidden_layers = 4
if self.attn_layer_indices is None:
d = [x for x in range(2, self.num_hidden_layers) if self.num_hidden_layers % x == 0]
if len(d) == 0:
raise ValueError("num_hidden_layers is prime, cannot automatically set attn_layer_indices.")
d = d[-1] # get the largest divisor
self.attn_layer_indices = [x + 1 for x in range(0, self.num_hidden_layers, d)]
return FalconH1Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
attention_dropout=self.attention_dropout,
attn_layer_indices=self.attn_layer_indices,
attn_rotary_emb=self.attn_rotary_emb,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
mamba_n_groups=self.mamba_n_groups,
mamba_n_heads=self.mamba_n_heads,
mamba_d_state=self.mamba_d_state,
mamba_d_conv=self.mamba_d_conv,
mamba_expand=self.mamba_expand,
mamba_chunk_size=self.mamba_chunk_size,
)
def create_and_check_model(
self,
config,
input_ids,
input_mask,
token_labels,
):
model = FalconH1Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
input_mask,
token_labels,
):
model = FalconH1ForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids, labels=token_labels)
result = model(input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
input_mask,
token_labels,
):
# config.is_decoder = True
# config.add_cross_attention = True
model = FalconH1ForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
# Attention: Jamba needs the cache to be initialized to return a cache!
past_key_values = FalconHybridMambaAttentionDynamicCache(
config,
input_ids.shape[0],
model.dtype,
devices=[model.device for _ in range(model.config.num_hidden_layers)],
)
outputs = model(
input_ids,
attention_mask=input_mask,
past_key_values=past_key_values,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
cache_position=torch.arange(
input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device
),
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
@require_torch
class FalconH1ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (FalconH1Model, FalconH1ForCausalLM) if is_torch_available() else ()
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
pipeline_model_mapping = (
{"feature-extraction": FalconH1Model, "text-generation": FalconH1ForCausalLM} if is_torch_available() else {}
)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, FalconHybridMambaAttentionDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
expected_shape = (batch_size, num_heads, seq_length, head_dim)
self.assertListEqual(
[key_tensor.shape for key_tensor in past_key_values.key_cache],
[expected_shape] * len(past_key_values.key_cache),
)
self.assertListEqual(
[value_cache.shape for value_cache in past_key_values.value_cache],
[expected_shape] * len(past_key_values.value_cache),
)
def _check_caches_are_equal(self, cache1, cache2):
if not isinstance(cache1, FalconHybridMambaAttentionDynamicCache) or not isinstance(
cache2, FalconHybridMambaAttentionDynamicCache
):
raise ValueError("The wrong cache is being used!")
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
torch.testing.assert_close(cache1.conv_states[idx], cache2.conv_states[idx])
torch.testing.assert_close(cache1.ssm_states[idx], cache2.ssm_states[idx])
def setUp(self):
self.model_tester = FalconH1ModelTester(self)
self.config_tester = ConfigTester(self, config_class=FalconH1Config, hidden_size=64)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the FalconH1 model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
expected_num_attentions = self.model_tester.num_hidden_layers
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_batching_equivalence(self):
# need to disable the tril input mask
orig = self.model_tester.use_input_mask
self.model_tester.use_input_mask = False
super().test_batching_equivalence()
self.model_tester.use_input_mask = orig
@pytest.mark.generate
def test_left_padding_compatibility(self):
# TODO: document why a random attention mask causes this test to fail, but a full mask doesn't
unpadded_custom_inputs = {"attention_mask": None}
super().test_left_padding_compatibility(unpadded_custom_inputs=unpadded_custom_inputs)
@slow
@require_torch
@require_torch_accelerator
class FalconH1ModelIntegrationTest(unittest.TestCase):
@slow
def test_falcon_h1_hard(self):
"""
An integration test for Falcon-H1.
"""
EXPECTED_TEXT_DEFAULT = """
user
Tell me about the french revolution.
assistant
The French Revolution (1789–1799) was a period of radical social and political upheaval in France that fundamentally transformed the nation and had profound effects on the rest of Europe and the world. Here are the key aspects of the revolution:
### **Causes**
1. **Economic Crisis**: France was in severe financial trouble due to costly wars (particularly the American Revolution), extravagant spending by the monarchy, and inefficient taxation.
2. **Social Inequality**: The rigid class system (the Ancien Régime) divided society into the privileged nobility and clergy (First Estate) and the commoners (Third Estate), who bore the brunt of taxation and had few rights.
3. **Enlightenment Ideas**: Philosophers like Voltaire, Rousseau, and Montesquieu inspired ideas of liberty, equality, and popular sovereignty.
4. **Settlement of 1789**: The Estates-General convened to address the financial crisis, leading to the Third Estate's assertion of its rights and the eventual abolition of the feudal system.
### **Key Events**
1. **Storming of the Bastille (July 14, 1789)**: A symbol of royal tyranny, the Bastille fortress was stormed by revolutionaries, sparking widespread rebellion.
2. **Declaration of the Rights of Man and of the Citizen (August 1789)**: A foundational document proclaiming liberty, equality, and fraternity.
3. **National Assembly and King’s Trial (1791–1792)**: King Louis XVI and his ministers were tried and executed (King Louis was guillotined, Marie Antoinette was banished), marking the end of the monarchy.
4. **Rise of the Jacobins and Reign of Terror (1793–1794)**: Radical leaders like Maximilien Robespierre sought to purge France of counter-revolutionaries, leading to mass executions and widespread fear.
5. **Thermidorian Reaction
"""
EXPECTED_TEXT_A10 = """
user
Tell me about the french revolution.
assistant
The French Revolution (1789–1799) was a period of profound social upheaval and radical political change in France that fundamentally transformed the nation and had far-reaching effects on the rest of Europe and the world. Here are the key aspects of the revolution:
### **Causes**
1. **Economic Crisis**: France was in severe financial trouble due to costly wars (particularly the American Revolution), extravagant spending by the monarchy, and an inefficient tax system.
2. **Social Inequality**: The privileged classes (the nobility and clergy) enjoyed immense wealth and power, while the majority of the population (the Third Estate, comprising commoners) faced poverty and lack of representation.
3. **Enlightenment Ideas**: Philosophers like Voltaire, Rousseau, and Montesquieu inspired ideas of liberty, equality, and popular sovereignty, which fueled revolutionary fervor.
4. **Political Instability**: The absolute monarchy under King Louis XVI proved unable to address the nation's problems, leading to growing discontent.
### **Key Events**
1. **Estates-General (1789)**: The Third Estate broke away and formed the National Assembly, forcing King Louis XVI to convene the Estates-General, an old legislative body, to address the financial crisis.
2. **Storming of the Bastille (July 14, 1789)**: A symbol of royal tyranny, the Bastille fortress was stormed by revolutionaries, sparking widespread rebellion.
3. **Declaration of the Rights of Man and of the Citizen (August 1789)**: This foundational document proclaimed liberty, equality, and fraternity as fundamental rights.
4. **Abolition of Feudalism (November 1789)**: The National Assembly abolished feudal privileges, redistributing church lands to the people.
5. **Tennis Court Oath (May 5, 1789)**: The National Assembly members, meeting on a tennis court, pledged to continue their work until a new constitution was established.
6.
"""
EXPECTED_TEXT_XPU = """
user
Tell me about the french revolution.
assistant
The French Revolution (1789–1799) was a period of radical social and political upheaval in France that fundamentally transformed the nation and had profound effects on the rest of Europe and the world. Here are the key aspects of the revolution:
### **Causes**
1. **Economic Crisis**: France was in severe financial trouble due to costly wars (particularly the American Revolution), extravagant spending by the monarchy, and inefficient taxation.
2. **Social Inequality**: The rigid class system (the Ancien Régime) favored the nobility and clergy while the majority of the population (the Third Estate) bore the brunt of taxation and had limited rights.
3. **Enlightenment Ideas**: Philosophers like Rousseau, Voltaire, and Montesquieu inspired ideas of liberty, equality, and popular sovereignty.
4. **Settlement of 1789**: The Estates-General convened to address the financial crisis, leading to debates that exposed the weaknesses of the monarchy and the grievances of the common people.
### **Key Events**
1. **Opening of the Revolution (1789)**:
- **Storming of the Bastille**: A symbol of royal tyranny, marking the start of the revolution.
- **Declaration of the Rights of Man and of the Citizen**: A foundational document proclaiming liberty, equality, and fraternity.
2. **Stages of the Revolution**:
- **Staffords' Reforms (1789–1791)**: Attempts to address grievances, including the abolition of feudal privileges and the introduction of the Civil Constitution of the Church.
- **Reign of Terror (1793–1794)**: Led by Maximilien Robespierre, characterized by mass executions of perceived enemies of the revolution, including King Louis XVI and Queen Marie Antoinette.
- **Thermidorian Reaction (1794)**: The fall of Robespierre and the end of the Reign of Terror.
3. **
"""
expected_texts = Expectations(
{
(None, None): EXPECTED_TEXT_DEFAULT,
("cuda", 8): EXPECTED_TEXT_A10,
("xpu", None): EXPECTED_TEXT_XPU,
}
)
EXPECTED_TEXT = expected_texts.get_expectation()
# Remove the first char (`\n`) and the consecutive whitespaces caused by the formatting.
EXPECTED_TEXT = EXPECTED_TEXT.strip().replace(" " * 12, "")
device_properties = get_device_properties()
# For A10, there is an ending " "
if device_properties[0] == "cuda" and device_properties[1] == 8:
EXPECTED_TEXT = EXPECTED_TEXT + " "
model_id = "tiiuae/Falcon-H1-1.5B-Deep-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = FalconH1ForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map="auto")
messages = [{"role": "user", "content": "Tell me about the french revolution."}]
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
inputs = tokenizer.encode(input_text, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/falcon_h1/test_modeling_falcon_h1.py",
"license": "Apache License 2.0",
"lines": 430,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:examples/3D_parallel.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":
This script is used to test training a model using Tensor Parallelism and Data Parallelism.
Usage:
export CUDA_VISIBLE_DEVICES=0,1,2,3
export CUDA_VISIBLE_DEVICES=4,5,6,7
export CUDA_VISIBLE_DEVICES=5,6,7
TP_SIZE=2 DP_SIZE=2 torchrun --nproc_per_node=4 --rdzv_endpoint=localhost:29503 examples/3D_parallel.py
CP_SIZE=2 DP_SIZE=2 torchrun --nproc_per_node=4 examples/3D_parallel.py
CP_SIZE=2 TP_SIZE=2 torchrun --nproc_per_node=4 examples/3D_parallel.py
DP_SIZE=2 CP_SIZE=2 TP_SIZE=2 torchrun --nproc_per_node=8 examples/3D_parallel.py
TP_SIZE=1 CP_SIZE=4 torchrun --nproc_per_node=4 examples/3D_parallel.py
TP_SIZE=1 DP_SIZE=4 torchrun --nproc_per_node=4 examples/3D_parallel.py
TP_SIZE=4 DP_SIZE=1 torchrun --nproc_per_node=4 --rdzv_endpoint=localhost:29503 examples/3D_parallel.py
IGNORE_SANITY=1 CP_SIZE=1 TP_SIZE=1 DP_SIZE=1 torchrun --nproc_per_node=1 --rdzv_endpoint=localhost:29504 examples/3D_parallel.py
ocalhost:29504 test_train.py
"""
import logging
import os
from collections.abc import Iterable
from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.optim as optim
import wandb
from datasets import load_dataset
from torch.distributed.checkpoint.state_dict import get_state_dict, set_state_dict
from torch.distributed.checkpoint.stateful import Stateful
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import ShardingStrategy
from torch.distributed.tensor import DTensor
from torch.distributed.tensor.experimental import context_parallel
from torch.nn.attention import SDPBackend, sdpa_kernel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from transformers import AutoModelForCausalLM, AutoTokenizer
# torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
# Set up logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# from torch.distributed.tensor.experimental._attention import set_rotate_method
# set_rotate_method("alltoall") # CP rotate shards using all-to-all
def main():
tp_size = int(os.environ.get("TP_SIZE", "1"))
dp_size = int(os.environ.get("DP_SIZE", "1"))
cp_size = int(os.environ.get("CP_SIZE", "1")) # Add CP size configuration
sdpa_backend = SDPBackend.FLASH_ATTENTION # For CP
# sdpa_backend = SDPBackend.MATH # For CP
global_batch_size = 8 # Desired global batch size
seq_len = 1024 # Sequence length
num_train_steps = 10000 # Number of training steps
LR = 1e-5
model_name = "HuggingFaceTB/SmolLM2-1.7B"
# model_name = "unsloth/Llama-3.2-1B"
CHECKPOINT_DIR = f"checkpoint_tp{tp_size}_dp{dp_size}_cp{cp_size}"
# Initialize distributed environment
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
dist.init_process_group("nccl")
rank = dist.get_rank()
world_size = dist.get_world_size()
local_rank = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
assert world_size == tp_size * dp_size * cp_size, (
f"World size ({world_size}) must equal TP size ({tp_size}) * DP size ({dp_size}) * CP size ({cp_size})"
)
mesh = torch.arange(world_size).reshape(dp_size, tp_size, cp_size)
world_mesh = DeviceMesh(device_type="cuda", mesh=mesh, mesh_dim_names=("dp", "tp", "cp"))
tp_mesh = world_mesh["tp"]
dp_mesh = world_mesh["dp"]
cp_mesh = world_mesh["cp"]
world_mesh["dp", "cp"]._flatten(mesh_dim_name="dp_cp")
logger.info(f"Created DeviceMesh: {world_mesh}")
logger.info(
f"Distributed setup - Rank: {rank}, World size: {world_size}, Local rank: {local_rank}, DP: {dp_mesh.get_local_rank()}, TP: {tp_mesh.get_local_rank()}, CP: {cp_mesh.get_local_rank()}"
)
if dist.get_rank() == 0:
wandb.init(
project="tp_dp_test",
config={
"tp_size": tp_size,
"dp_size": dp_size,
"cp_size": cp_size,
"global_batch_size": global_batch_size,
"model_name": model_name,
"dataset": "roneneldan/TinyStories-1M",
"seq_len": seq_len,
"lr": LR,
"weight_decay": 0.1,
},
name=f"llama_tp{tp_size}_dp{dp_size}_cp{cp_size}"
if model_name == "unsloth/Llama-3.2-1B"
else f"tp{tp_size}_dp{dp_size}_cp{cp_size}",
)
logger.info("Wandb initialized.")
# Log the current file to wandb
wandb.save("test_train.py")
# Load model and tokenizer
logger.info(f"Loading model and tokenizer from {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
logger.info(f"Set pad_token to eos_token: {tokenizer.pad_token}")
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_mesh=tp_mesh if dist.is_initialized() else None,
tp_plan="auto",
dtype=torch.bfloat16,
)
logger.info(f"Model loaded onto device mesh: {tp_mesh}")
device = torch.device(f"cuda:{local_rank}")
logger.info(f"Using device: {device} for non-model tensors")
use_ddp = False
if dist.is_initialized() and dp_mesh.size() > 1:
model = FSDP(model, device_mesh=dp_mesh, sharding_strategy=ShardingStrategy.NO_SHARD)
use_ddp = True
model.train()
logger.info("Loading TinyStories dataset...")
raw_dataset = load_dataset("roneneldan/TinyStories", split="train[:1%]") # Use 1% for faster testing
def tokenize_function(examples):
# Tokenize the text without padding
tokenized_batch = tokenizer(
examples["text"], padding=False, truncation=True, max_length=seq_len, return_tensors=None
)
# Set labels to be the same as input_ids for Causal LM
tokenized_batch["labels"] = tokenized_batch["input_ids"].copy()
return tokenized_batch
tokenized_dataset = raw_dataset.map(tokenize_function, batched=True, remove_columns=["text"])
logger.info(f"Dataset loaded and tokenized. Size: {len(tokenized_dataset)}")
# Create packed sequences
def create_packed_sequences(examples):
# Flatten all sequences
all_tokens = []
for input_ids in examples["input_ids"]:
all_tokens.extend(input_ids)
# Split into sequences of seq_len + 1 (for input + label)
num_sequences = len(all_tokens) // (seq_len + 1)
packed_input_ids = []
packed_labels = []
for i in range(num_sequences):
start_idx = i * (seq_len + 1)
end_idx = start_idx + (seq_len + 1)
# Get the full sequence
full_sequence = all_tokens[start_idx:end_idx]
# For input_ids, remove the last token
packed_input_ids.append(full_sequence[:-1])
# For labels, remove the first token
packed_labels.append(full_sequence[1:])
return {"input_ids": packed_input_ids, "labels": packed_labels}
# Apply packing to the dataset
packed_dataset = tokenized_dataset.map(
create_packed_sequences,
batched=True,
remove_columns=tokenized_dataset.column_names,
batch_size=1000, # Process in batches for efficiency
num_proc=60,
)
logger.info(f"Dataset packed. New size: {len(packed_dataset)}")
# Shuffle the packed dataset
packed_dataset = packed_dataset.shuffle(seed=42)
logger.info("Packed dataset shuffled")
# Calculate local batch size
if dist.is_initialized():
assert global_batch_size % dp_mesh.size() == 0, (
f"Global batch size ({global_batch_size}) must be divisible by DP size ({dp_mesh.size()})"
)
local_batch_size = global_batch_size // dp_mesh.size()
else:
local_batch_size = global_batch_size
logger.info(
f"Global batch size: {global_batch_size}, DP size: {dp_size if dist.is_initialized() else 1}, Local batch size: {local_batch_size}"
)
# Simple collate function since sequences are already packed
def collate_fn(batch):
input_ids = torch.tensor([item["input_ids"] for item in batch], dtype=torch.long)
labels = torch.tensor([item["labels"] for item in batch], dtype=torch.long)
return {"input_ids": input_ids, "labels": labels}
if dist.is_initialized():
sampler = DistributedSampler(
packed_dataset, num_replicas=dp_mesh.size(), rank=dp_mesh.get_local_rank(), shuffle=False
)
else:
sampler = None
dataloader = DataLoader(
packed_dataset,
batch_size=local_batch_size,
sampler=sampler,
shuffle=False,
collate_fn=collate_fn,
pin_memory=True,
)
logger.info(f"DataLoader created. Distributed: {dist.is_initialized()}")
optimizer = optim.AdamW(model.parameters(), lr=LR, weight_decay=0.1)
# Training loop
logger.info(f"Starting training for {num_train_steps} steps...")
model.train()
step = 0
while step < num_train_steps:
for batch in dataloader:
if step >= num_train_steps:
break # Exit loop if max steps reached
# Move batch to appropriate device
batch = {k: v.to(device) for k, v in batch.items()}
optimizer.zero_grad()
# Add position_ids to batch before CP sharding
batch_size = batch["input_ids"].shape[0]
position_ids = torch.arange(0, seq_len, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(batch_size, -1)
batch["position_ids"] = position_ids
from torch.distributed.tensor.experimental._attention import _cp_options
_cp_options.enable_load_balance = False
with sdpa_kernel(sdpa_backend): # TODO: ideally move this to attention implementation
cp_context = (
nullcontext()
if cp_mesh.size() == 1
else context_parallel(
cp_mesh,
buffers=[
batch["input_ids"],
batch["labels"],
batch["position_ids"],
],
buffer_seq_dims=[1, 1, 1],
)
)
with cp_context:
# Pop labels from batch before model forward pass
labels = batch.pop("labels")
outputs = model(**batch) # [mbs, seq_len/cp]
loss = outputs.loss
logits = outputs.logits
# Compute loss with shifted labels
loss = model.loss_function(
logits=logits, labels=None, shift_labels=labels, vocab_size=model.config.vocab_size
)
loss.backward()
# all reduce grads across dp_cp if applicable
all_reduce_grads(model, world_mesh, use_ddp=use_ddp)
if hasattr(model, "clip_grad_norm_"):
gradnorm = model.clip_grad_norm_(max_norm=1.0, norm_type=2.0) # TODO: fix reported gradnorm
else:
# only works with FSDP's NO_SHARD otherwise we should use FSDP's clip_grad_norm_
assert len(list(model.parameters())) > 5, "No parameters found in model. Probably DDP bug.."
gradnorm = clip_grad_norm_(model.parameters(), max_norm=1.0, norm_type=2.0, foreach=True)
optimizer.step()
# allreduce loss across cp_dp before logging
if dist.is_initialized() and (cp_mesh.size() > 1 or dp_mesh.size() > 1):
dist.all_reduce(loss, group=world_mesh["dp_cp"].get_group(), op=dist.ReduceOp.AVG)
current_loss = loss.item()
# Log loss and gradnorm to wandb (only on rank 0 of dp group)
if not dist.is_initialized() or dist.get_rank() == 0:
logger.info(
f"Step: {step} | GBS: {global_batch_size} | DP: {dp_mesh.size()} | TP: {tp_mesh.size()} | CP: {cp_mesh.size()} | Loss: {current_loss} | Gradnorm: {gradnorm} | lr: {LR}"
)
wandb.log(
{
"train/loss": current_loss,
"train/gradnorm": gradnorm,
"step": step,
"lr": LR,
"GBS": global_batch_size,
}
)
step += 1 # Increment step count
logger.info("Training loop finished.")
# Save model using DCP (only if distributed)
if dist.is_initialized():
state_dict = {"app": AppState(model, optimizer)}
dcp.save(
state_dict=state_dict,
checkpoint_id=CHECKPOINT_DIR,
)
logger.info(f"Saved checkpoint to {CHECKPOINT_DIR}")
else:
# Fallback to regular save for non-distributed case
save_dir = "test_model_nondist"
model.save_pretrained(save_dir)
tokenizer.save_pretrained(save_dir) # Save tokenizer too
logger.info(f"Saved model to {save_dir}")
dist.destroy_process_group()
logger.info("Cleaned up distributed process group")
# Finish wandb run on rank 0
if dist.get_rank() == 0:
wandb.finish()
logger.info("Wandb run finished.")
def all_reduce_grads(model, world_mesh, use_ddp):
"""All reduce gradients across dp_cp if applicable."""
cp_mesh = world_mesh["cp"]
if use_ddp:
# DDP/FSDP takes care of syncing grads
mesh = cp_mesh
else:
mesh = world_mesh["dp", "cp"]._flatten(mesh_dim_name="dp_cp")
if dist.is_initialized() and mesh.size() > 1:
for name, param in model.named_parameters():
if param.grad is not None:
# Workaround for cross-mesh communication limitation with DTensor gradients
if isinstance(param.grad, DTensor):
local_grad = param.grad.to_local()
# Ensure grad requires grad for inplace modification checks (might not be needed)
# local_grad = local_grad.detach().requires_grad_(True)
torch.distributed.all_reduce(local_grad, op=torch.distributed.ReduceOp.SUM, group=mesh.get_group())
local_grad = local_grad / mesh.size()
# Assign averaged grad back - need careful handling if DTensor structure is complex
# This simple assignment might work if the grad structure matches param structure
param.grad = DTensor.from_local(
local_grad, device_mesh=param.grad.device_mesh, placements=param.grad.placements
)
else:
# Handle regular tensors if any exist (e.g. buffers not converted to DTensor)
torch.distributed.all_reduce(param.grad, op=torch.distributed.ReduceOp.AVG, group=mesh.get_group())
class AppState(Stateful):
"""Wrapper for checkpointing the Application State including model and optimizer."""
def __init__(self, model, optimizer=None):
self.model = model
self.optimizer = optimizer
def state_dict(self):
model_state_dict, optimizer_state_dict = get_state_dict(self.model, self.optimizer)
return {"model": model_state_dict, "optim": optimizer_state_dict}
def load_state_dict(self, state_dict):
set_state_dict(
self.model, self.optimizer, model_state_dict=state_dict["model"], optim_state_dict=state_dict["optim"]
)
def clip_grad_norm_(
parameters: Iterable[torch.Tensor],
max_norm: float,
norm_type: float = 2.0,
error_if_nonfinite: bool = False,
foreach: bool | None = None,
) -> torch.Tensor:
"""
Clip the gradient norm of an iterable of parameters.
"""
# Filter out parameters with no gradients
parameters = [p for p in parameters if p.grad is not None]
assert len(parameters) > 0, "No parameters with gradients found"
# Calculate total norm
if norm_type == float("inf"):
total_norm = max(p.grad.detach().abs().max() for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type) for p in parameters]), norm_type)
# Convert DTensor to local tensor if needed
if isinstance(total_norm, DTensor):
total_norm = total_norm.full_tensor()
# Clip gradients
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.detach().mul_(clip_coef)
return total_norm
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/3D_parallel.py",
"license": "Apache License 2.0",
"lines": 370,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:examples/pytorch/3d_parallel_checks.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":
This script is used to test training a model using Tensor Parallelism and Data Parallelism.
Usage:
export CUDA_VISIBLE_DEVICES=0,1,2,3
export CUDA_VISIBLE_DEVICES=4,5,6,7
export CUDA_VISIBLE_DEVICES=5,6,7
TP_SIZE=2 DP_SIZE=2 torchrun --nproc_per_node=4 --rdzv_endpoint=localhost:29503 test_train.py
CP_SIZE=2 DP_SIZE=2 torchrun --nproc_per_node=4 test_train.py
CP_SIZE=2 TP_SIZE=2 torchrun --nproc_per_node=4 test_train.py
TP_SIZE=1 CP_SIZE=4 torchrun --nproc_per_node=4 test_train.py
TP_SIZE=1 DP_SIZE=4 torchrun --nproc_per_node=4 test_train.py
TP_SIZE=4 DP_SIZE=1 torchrun --nproc_per_node=4 --rdzv_endpoint=localhost:29503 test_train.py
IGNORE_SANITY=1 CP_SIZE=1 TP_SIZE=1 DP_SIZE=1 torchrun --nproc_per_node=1 --rdzv_endpoint=l
ocalhost:29504 test_train.py
"""
import logging
import os
from collections.abc import Iterable
from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
import torch.optim as optim
import wandb
from datasets import load_dataset
from torch.distributed.checkpoint.state_dict import get_state_dict, set_state_dict
from torch.distributed.checkpoint.stateful import Stateful
from torch.distributed.device_mesh import DeviceMesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import ShardingStrategy
from torch.distributed.tensor import DTensor
from torch.distributed.tensor.experimental import context_parallel
from torch.nn.attention import SDPBackend, sdpa_kernel
from torch.utils.data import DataLoader, default_collate
from torch.utils.data.distributed import DistributedSampler
from transformers import AutoModelForCausalLM, AutoTokenizer
ignore_sanity_checks = int(os.environ.get("IGNORE_SANITY", "0")) == 1
# torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
# Set up logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# from torch.distributed.tensor.experimental._attention import set_rotate_method
# set_rotate_method("alltoall") # rotate shards using all-to-all
def main():
tp_size = int(os.environ.get("TP_SIZE", "1"))
dp_size = int(os.environ.get("DP_SIZE", "4"))
cp_size = int(os.environ.get("CP_SIZE", "1")) # Add CP size configuration
sdpa_backend = SDPBackend.FLASH_ATTENTION # For CP
# sdpa_backend = SDPBackend.MATH # For CP
global_batch_size = 8 # Desired global batch size
seq_len = 1024 # Sequence length
num_train_steps = 10000 # Number of training steps
LR = 1e-5
model_name = "HuggingFaceTB/SmolLM2-1.7B"
# model_name = "unsloth/Llama-3.2-1B"
CHECKPOINT_DIR = f"checkpoint_tp{tp_size}_dp{dp_size}_cp{cp_size}"
# Initialize distributed environment
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
dist.init_process_group("nccl")
rank = dist.get_rank()
world_size = dist.get_world_size()
local_rank = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
assert world_size == tp_size * dp_size * cp_size, (
f"World size ({world_size}) must equal TP size ({tp_size}) * DP size ({dp_size}) * CP size ({cp_size})"
)
mesh = torch.arange(world_size).reshape(dp_size, tp_size, cp_size)
world_mesh = DeviceMesh(device_type="cuda", mesh=mesh, mesh_dim_names=("dp", "tp", "cp"))
tp_mesh = world_mesh["tp"]
dp_mesh = world_mesh["dp"]
cp_mesh = world_mesh["cp"]
world_mesh["dp", "cp"]._flatten(mesh_dim_name="dp_cp")
logger.info(f"Created DeviceMesh: {world_mesh}")
logger.info(
f"Distributed setup - Rank: {rank}, World size: {world_size}, Local rank: {local_rank}, DP: {dp_mesh.get_local_rank()}, TP: {tp_mesh.get_local_rank()}, CP: {cp_mesh.get_local_rank()}"
)
if dist.get_rank() == 0:
wandb.init(
project="tp_dp_test",
config={
"tp_size": tp_size,
"dp_size": dp_size,
"cp_size": cp_size,
"global_batch_size": global_batch_size,
"model_name": model_name,
"dataset": "roneneldan/TinyStories-1M",
"seq_len": seq_len,
"lr": LR,
"weight_decay": 0.1,
},
name=f"llama_tp{tp_size}_dp{dp_size}_cp{cp_size}"
if model_name == "unsloth/Llama-3.2-1B"
else f"tp{tp_size}_dp{dp_size}_cp{cp_size}",
)
logger.info(f"ignore_sanity_checks is set to: {ignore_sanity_checks}")
logger.info("Wandb initialized.")
# Log the current file to wandb
wandb.save("test_train.py")
else:
logger.info("Running in non-distributed mode. DeviceMesh not applicable.")
rank = 0
world_size = 1
local_rank = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wandb.init(
project="tp_dp_test",
config={
"tp_size": 1,
"dp_size": 1,
"global_batch_size": global_batch_size,
"model_name": model_name,
"dataset": "roneneldan/TinyStories-1M",
"seq_len": seq_len,
},
name="llama_tp1_dp1_nondist" if model_name == "unsloth/Llama-3.2-1B" else "tp1_dp1_nondist",
)
logger.info("Wandb initialized for non-distributed run.")
# Load model and tokenizer
logger.info(f"Loading model and tokenizer from {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
logger.info(f"Set pad_token to eos_token: {tokenizer.pad_token}")
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_mesh=tp_mesh if dist.is_initialized() else None,
tp_plan="auto",
dtype=torch.bfloat16,
)
logger.info(f"Model loaded onto device mesh: {tp_mesh}")
if dist.is_initialized():
assert model.config.num_key_value_heads % tp_mesh.size() == 0, (
f"num_key_value_heads={model.config.num_key_value_heads} must be divisible by tp_size={tp_mesh.size()}"
)
device = torch.device(f"cuda:{local_rank}")
else:
model = model.to(device)
logger.info(f"Using device: {device} for non-model tensors")
use_ddp = False
if dist.is_initialized() and dp_mesh.size() > 1:
# FSDP1
model = FSDP(model, device_mesh=dp_mesh, sharding_strategy=ShardingStrategy.NO_SHARD)
# FSDP2
# for transformer_block in model.model.layers:
# fully_shard(transformer_block, mesh=dp_mesh, reshard_after_forward=False)
# fully_shard(model.model, mesh=dp_mesh, reshard_after_forward=False)
# DDP
# replicate(model, device_mesh=dp_mesh, bucket_cap_mb=100)
# assert len(list(model.parameters()))>5, "No parameters found in model. Probably DDP/FSDP bug.." # TODO: we should be cautious abt using model.parameters()
use_ddp = True
model.train()
assert len(list(model.parameters())) > 0, "No parameters found in model. Probably DDP bug.."
assert len([p for p in model.parameters() if p.requires_grad]) > 0, (
"No gradients found in model. Probably DDP bug.."
)
if dist.is_initialized() and not ignore_sanity_checks:
# assert model is replicated across all dp
for name, param in model.named_parameters():
sanity_check_tensor_sync(param, dp_mesh)
# assert model is different across tp (only for sharded params)
for name, param in model.named_parameters():
if isinstance(param, DTensor) and param.placements[0].is_shard():
# Only check sharded parameters for non-sync across TP
sanity_check_tensor_sync(param, tp_mesh, not_sync=True)
elif isinstance(param, DTensor) and param.placements[0].is_replicate():
# Replicated parameters should be the same across TP
sanity_check_tensor_sync(param, tp_mesh)
# assert model is replicated across cp
for name, param in model.named_parameters():
sanity_check_tensor_sync(param, cp_mesh)
# Load and preprocess TinyStories dataset
logger.info("Loading TinyStories dataset...")
raw_dataset = load_dataset("roneneldan/TinyStories", split="train[:1%]") # Use 1% for faster testing
def tokenize_function(examples):
# Tokenize the text without padding
tokenized_batch = tokenizer(
examples["text"], padding=False, truncation=True, max_length=seq_len, return_tensors=None
)
# Set labels to be the same as input_ids for Causal LM
tokenized_batch["labels"] = tokenized_batch["input_ids"].copy()
return tokenized_batch
tokenized_dataset = raw_dataset.map(tokenize_function, batched=True, remove_columns=["text"])
logger.info(f"Dataset loaded and tokenized. Size: {len(tokenized_dataset)}")
# Create packed sequences
def create_packed_sequences(examples):
# Flatten all sequences
all_tokens = []
for input_ids in examples["input_ids"]:
all_tokens.extend(input_ids)
# Split into sequences of seq_len + 1 (for input + label)
num_sequences = len(all_tokens) // (seq_len + 1)
packed_input_ids = []
packed_labels = []
for i in range(num_sequences):
start_idx = i * (seq_len + 1)
end_idx = start_idx + (seq_len + 1)
# Get the full sequence
full_sequence = all_tokens[start_idx:end_idx]
# For input_ids, remove the last token
packed_input_ids.append(full_sequence[:-1])
# For labels, remove the first token
packed_labels.append(full_sequence[1:])
return {"input_ids": packed_input_ids, "labels": packed_labels}
# Apply packing to the dataset
packed_dataset = tokenized_dataset.map(
create_packed_sequences,
batched=True,
remove_columns=tokenized_dataset.column_names,
batch_size=1000, # Process in batches for efficiency
num_proc=60,
)
logger.info(f"Dataset packed. New size: {len(packed_dataset)}")
# Shuffle the packed dataset
packed_dataset = packed_dataset.shuffle(seed=42)
logger.info("Packed dataset shuffled")
# Calculate local batch size
if dist.is_initialized():
assert global_batch_size % dp_mesh.size() == 0, (
f"Global batch size ({global_batch_size}) must be divisible by DP size ({dp_mesh.size()})"
)
local_batch_size = global_batch_size // dp_mesh.size()
else:
local_batch_size = global_batch_size
logger.info(
f"Global batch size: {global_batch_size}, DP size: {dp_size if dist.is_initialized() else 1}, Local batch size: {local_batch_size}"
)
# Simple collate function since sequences are already packed
def collate_fn(batch):
input_ids = torch.tensor([item["input_ids"] for item in batch], dtype=torch.long)
labels = torch.tensor([item["labels"] for item in batch], dtype=torch.long)
return {"input_ids": input_ids, "labels": labels}
if dist.is_initialized():
sampler = DistributedSampler(
packed_dataset, num_replicas=dp_mesh.size(), rank=dp_mesh.get_local_rank(), shuffle=False
)
else:
sampler = None
dataloader = DataLoader(
packed_dataset,
batch_size=local_batch_size,
sampler=sampler,
shuffle=False,
collate_fn=collate_fn,
)
logger.info(f"DataLoader created. Distributed: {dist.is_initialized()}")
optimizer = optim.AdamW(model.parameters(), lr=LR, weight_decay=0.1)
# Training loop
logger.info(f"Starting training for {num_train_steps} steps...")
model.train()
step = 0
while step < num_train_steps:
for batch in dataloader:
if step >= num_train_steps:
break # Exit loop if max steps reached
# Move batch to appropriate device
batch = {k: v.to(device) for k, v in batch.items()}
# Sanity checks for batch distribution (only if distributed)
if dist.is_initialized() and not ignore_sanity_checks:
# check batch is same across all tp
sanity_check_tensor_sync(batch["input_ids"], tp_mesh)
# check batch is different across dp
sanity_check_tensor_sync(batch["input_ids"], dp_mesh, not_sync=True)
optimizer.zero_grad()
# Add position_ids to batch before CP sharding
batch_size = batch["input_ids"].shape[0]
position_ids = torch.arange(0, seq_len, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(batch_size, -1)
batch["position_ids"] = position_ids
from torch.distributed.tensor.experimental._attention import _cp_options
_cp_options.enable_load_balance = False
with sdpa_kernel(sdpa_backend): # TODO: ideally move this to attention implementation
cp_context = (
nullcontext()
if cp_mesh.size() == 1
else context_parallel(
cp_mesh,
buffers=[
batch["input_ids"],
batch["labels"],
batch["position_ids"],
], # TODO: need to add attention mask
buffer_seq_dims=[1, 1, 1],
)
)
with cp_context:
# Pop labels from batch before model forward pass
labels = batch.pop("labels")
outputs = model(**batch) # [mbs, seq_len/cp]
loss = outputs.loss
logits = outputs.logits
# Compute loss with shifted labels
loss = model.loss_function(
logits=logits, labels=None, shift_labels=labels, vocab_size=model.config.vocab_size
)
# Sanity checks for logits
if dist.is_initialized() and not ignore_sanity_checks:
# sanity_check_tensor_sync(logits, tp_mesh) # TODO: only true without sequence parallel
sanity_check_tensor_sync(logits, dp_mesh, not_sync=True)
sanity_check_tensor_sync(logits, cp_mesh, not_sync=True)
loss.backward()
# all reduce grads across dp_cp if applicable
all_reduce_grads(model, world_mesh, use_ddp=use_ddp)
# Sanity checks for gradients (only if distributed)
if dist.is_initialized() and not ignore_sanity_checks:
# check grads are not same across all tp (for sharded grads)
for name, param in model.named_parameters():
if param.grad is not None and isinstance(param.grad, DTensor):
if param.grad.placements[0].is_shard():
sanity_check_tensor_sync(param.grad, tp_mesh, not_sync=True)
elif param.grad.placements[0].is_replicate():
sanity_check_tensor_sync(param.grad, tp_mesh)
# check grads are same across dp
for name, param in model.named_parameters():
if param.grad is not None and dp_mesh.size() > 1:
sanity_check_tensor_sync(param.grad, dp_mesh)
# check grads are same across cp
for name, param in model.named_parameters():
if param.grad is not None and cp_mesh.size() > 1:
sanity_check_tensor_sync(param.grad, cp_mesh)
# Calculate gradient norm and clip gradients
if hasattr(model, "clip_grad_norm_"):
# when using FSDP or DDP, model.parameters() doesn't work
gradnorm = model.clip_grad_norm_(max_norm=1.0, norm_type=2.0)
else:
assert len(list(model.parameters())) > 2, "No parameters found in model. Probably DDP bug.."
assert len([p for p in model.parameters() if p.requires_grad]) > 2, (
"No gradients found in model. Probably DDP bug.."
)
assert len([p for p in model.parameters() if p.grad is not None]) > 2, (
"No gradients found in model. Probably DDP bug.."
)
# only works with FSDP's NO_SHARD otherwise we should use FSDP's clip_grad_norm_
gradnorm = clip_grad_norm_(model.parameters(), max_norm=1.0, norm_type=2.0, foreach=True)
optimizer.step()
# Sanity checks for updated model parameters (only if distributed)
if dist.is_initialized() and not ignore_sanity_checks:
# check updated model is different across all tp (for sharded params)
for name, param in model.named_parameters():
if isinstance(param, DTensor):
if param.placements[0].is_shard():
sanity_check_tensor_sync(param, tp_mesh, not_sync=True)
elif param.placements[0].is_replicate():
sanity_check_tensor_sync(param, tp_mesh)
# check updated model is same across dp
for name, param in model.named_parameters():
sanity_check_tensor_sync(param, dp_mesh)
# check updated model is same across cp
for name, param in model.named_parameters():
sanity_check_tensor_sync(param, cp_mesh)
# allreduce loss across cp_dp before logging
if dist.is_initialized() and (cp_mesh.size() > 1 or dp_mesh.size() > 1):
dist.all_reduce(loss, group=world_mesh["dp_cp"].get_group(), op=dist.ReduceOp.AVG)
current_loss = loss.item()
# Log loss and gradnorm to wandb (only on rank 0 of dp group)
if not dist.is_initialized() or dist.get_rank() == 0:
logger.info(
f"Step: {step} | GBS: {global_batch_size} | DP: {dp_mesh.size()} | TP: {tp_mesh.size()} | CP: {cp_mesh.size()} | Loss: {current_loss} | Gradnorm: {gradnorm} | lr: {LR}"
)
wandb.log(
{
"train/loss": current_loss,
"train/gradnorm": gradnorm,
"step": step,
"lr": LR,
"GBS": global_batch_size,
}
)
step += 1 # Increment step count
logger.info("Training loop finished.")
# Save model using DCP (only if distributed)
if dist.is_initialized():
state_dict = {"app": AppState(model, optimizer)}
dcp.save(
state_dict=state_dict,
checkpoint_id=CHECKPOINT_DIR,
)
logger.info(f"Saved checkpoint to {CHECKPOINT_DIR}")
else:
# Fallback to regular save for non-distributed case
save_dir = "test_model_nondist"
model.save_pretrained(save_dir)
tokenizer.save_pretrained(save_dir) # Save tokenizer too
logger.info(f"Saved model to {save_dir}")
# Example of loading the checkpoint (only if distributed)
if dist.is_initialized():
# Create a new model instance
logger.info("Creating new model instance for verification")
new_model = AutoModelForCausalLM.from_pretrained(
model_name,
device_mesh=tp_mesh,
dtype=torch.bfloat16, # Use same dtype
)
new_optimizer = optim.AdamW(new_model.parameters(), lr=LR)
# Load checkpoint into new model
state_dict = {"app": AppState(new_model, new_optimizer)}
dcp.load(
state_dict=state_dict,
checkpoint_id=CHECKPOINT_DIR,
)
logger.info("Loaded checkpoint into new model")
# Verify model weights match
logger.info("Verifying model weights match...")
for (name1, param1), (name2, param2) in zip(model.named_parameters(), new_model.named_parameters()):
torch.testing.assert_close(
param1.to_local(),
param2.to_local(),
rtol=1e-3,
atol=1e-3,
msg=f"Weights mismatch in {name1} vs {name2}",
)
# Verify optimizer states match
logger.info("Verifying optimizer states match...")
for name1, state1 in optimizer.state_dict().items():
state2 = new_optimizer.state_dict()[name1]
if name1 == "state":
# Compare state dictionaries for each parameter
for param_id, param_state1 in state1.items():
param_state2 = state2[param_id]
# Compare each state component (step, exp_avg, exp_avg_sq)
for key, value1 in param_state1.items():
value2 = param_state2[key]
if isinstance(value1, DTensor):
# Convert DTensors to local tensors for comparison
torch.testing.assert_close(
value1.to_local(),
value2.to_local(),
rtol=1e-5,
atol=1e-5,
msg=f"Optimizer state mismatch in state[{param_id}][{key}]",
)
else:
torch.testing.assert_close(
value1,
value2,
rtol=1e-5,
atol=1e-5,
msg=f"Optimizer state mismatch in state[{param_id}][{key}]",
)
elif name1 == "param_groups":
# Compare param_groups (excluding the actual params list)
for i, (group1, group2) in enumerate(zip(state1, state2)):
for key in group1:
if key != "params": # Skip comparing the params list
assert group1[key] == group2[key], f"Param group mismatch in param_groups[{i}][{key}]"
# Run a forward pass with both models to verify outputs match
logger.info("Running forward pass verification...")
with torch.no_grad():
# Use the last batch for verification
batch = {k: v.to(device) for k, v in batch.items()} # Ensure batch is on correct device
original_outputs = model(**batch)
new_outputs = new_model(**batch)
torch.testing.assert_close(
original_outputs.logits.to_local(),
new_outputs.logits.to_local(),
rtol=1e-3,
atol=1e-3,
msg="Model outputs do not match!",
) # Increased tolerance slightly for bf16
# Clean up distributed environment and finish wandb run
if dist.is_initialized():
dist.destroy_process_group()
logger.info("Cleaned up distributed process group")
# Finish wandb run on rank 0
if dist.get_rank() == 0:
wandb.finish()
logger.info("Wandb run finished.")
else:
wandb.finish()
logger.info("Wandb run finished.")
def all_reduce_grads(model, world_mesh, use_ddp):
"""All reduce gradients across dp_cp if applicable."""
cp_mesh = world_mesh["cp"]
if use_ddp:
# DDP takes care of syncing grads
mesh = cp_mesh
else:
mesh = world_mesh["dp", "cp"]._flatten(mesh_dim_name="dp_cp")
if dist.is_initialized() and mesh.size() > 1:
for name, param in model.named_parameters():
if param.grad is not None:
# Workaround for cross-mesh communication limitation with DTensor gradients
if isinstance(param.grad, DTensor):
local_grad = param.grad.to_local()
# Ensure grad requires grad for inplace modification checks (might not be needed)
# local_grad = local_grad.detach().requires_grad_(True)
torch.distributed.all_reduce(local_grad, op=torch.distributed.ReduceOp.SUM, group=mesh.get_group())
local_grad = local_grad / mesh.size()
# Assign averaged grad back - need careful handling if DTensor structure is complex
# This simple assignment might work if the grad structure matches param structure
param.grad = DTensor.from_local(
local_grad, device_mesh=param.grad.device_mesh, placements=param.grad.placements
)
else:
# Handle regular tensors if any exist (e.g. buffers not converted to DTensor)
torch.distributed.all_reduce(param.grad, op=torch.distributed.ReduceOp.AVG, group=mesh.get_group())
class ContextParallelCollator:
"""Collator for context parallel training that splits sequences into chunks."""
def __init__(self, cp_mesh: DeviceMesh | None = None):
self.cp_mesh = cp_mesh
def __call__(self, batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
batch = default_collate(batch)
if self.cp_mesh is not None and self.cp_mesh.size() > 1:
# Get sequence length from the input batch
seq_len = batch["input_ids"].shape[1]
assert seq_len % self.cp_mesh.size() == 0, (
f"Sequence length {seq_len} must be divisible by CP size {self.cp_mesh.size()}"
)
chunk_size = seq_len // self.cp_mesh.size()
cp_rank = self.cp_mesh.get_local_rank()
start_idx = cp_rank * chunk_size
end_idx = start_idx + chunk_size
# Keep only the local chunk of the sequence
batch["input_ids"] = batch["input_ids"][:, start_idx:end_idx]
batch["attention_mask"] = batch["attention_mask"][:, start_idx:end_idx]
batch["labels"] = batch["labels"][:, start_idx:end_idx]
return batch
class AppState(Stateful):
"""Wrapper for checkpointing the Application State including model and optimizer."""
def __init__(self, model, optimizer=None):
self.model = model
self.optimizer = optimizer
def state_dict(self):
model_state_dict, optimizer_state_dict = get_state_dict(self.model, self.optimizer)
return {"model": model_state_dict, "optim": optimizer_state_dict}
def load_state_dict(self, state_dict):
set_state_dict(
self.model, self.optimizer, model_state_dict=state_dict["model"], optim_state_dict=state_dict["optim"]
)
def sanity_check_tensor_sync(
tensor: torch.Tensor, mesh: DeviceMesh, rtol: float = 1e-4, atol: float = 1e-4, not_sync: bool = False
) -> None:
"""
Verify that a tensor is synchronized (or not synchronized) across all processes in the mesh's process group.
Handles both regular tensors and DTensors.
Args:
tensor (torch.Tensor): The tensor to check for synchronization (can be DTensor)
mesh (DeviceMesh): The device mesh containing the process group
rtol (float): Relative tolerance for comparison
atol (float): Absolute tolerance for comparison
not_sync (bool): If True, asserts that tensors are NOT synchronized. If False, asserts they are synchronized.
"""
if not dist.is_initialized() or mesh.size() == 1:
return # No need to check in non-distributed mode
# Get the process group from the mesh
pg = mesh.get_group()
# Convert DTensor to local tensor if needed
if hasattr(tensor, "to_local"):
local_tensor = tensor.to_local()
else:
local_tensor = tensor
# Gather tensors from all processes
world_size = dist.get_world_size(pg)
gathered_tensors = [torch.empty_like(local_tensor) for _ in range(world_size)]
dist.all_gather(gathered_tensors, local_tensor, group=pg)
# Compare each tensor with the first one
for i in range(1, world_size):
try:
torch.testing.assert_close(gathered_tensors[0], gathered_tensors[i], rtol=rtol, atol=atol)
except AssertionError as e:
if not_sync:
continue
# # Add detailed debugging for logit synchronization issues
# print(f"\nLogit synchronization error between rank 0 and rank {i}:")
# print(f"Tensor shape: {gathered_tensors[0].shape}")
# print(f"Number of mismatched elements: {(gathered_tensors[0] != gathered_tensors[i]).sum()}")
# print(f"Percentage of mismatched elements: {((gathered_tensors[0] != gathered_tensors[i]).sum() / gathered_tensors[0].numel() * 100):.2f}%")
# # Find the first few mismatches
# mismatches = torch.nonzero(gathered_tensors[0] != gathered_tensors[i])
# print("\nFirst few mismatches:")
# for idx in mismatches[:5]:
# idx = tuple(idx.tolist())
# print(f"Index {idx}:")
# print(f"Rank 0 value: {gathered_tensors[0][idx]}")
# print(f"Rank {i} value: {gathered_tensors[i][idx]}")
# print(f"Absolute difference: {abs(gathered_tensors[0][idx] - gathered_tensors[i][idx])}")
# print(f"Relative difference: {abs(gathered_tensors[0][idx] - gathered_tensors[i][idx]) / max(abs(gathered_tensors[0][idx]), abs(gathered_tensors[i][idx]))}")
# # Check if differences are systematic (e.g., all positive or negative)
# diff = gathered_tensors[0] - gathered_tensors[i]
# print(f"\nDifference statistics:")
# print(f"Mean difference: {diff.mean()}")
# print(f"Std difference: {diff.std()}")
# print(f"Max positive difference: {diff.max()}")
# print(f"Max negative difference: {diff.min()}")
raise e
def clip_grad_norm_(
parameters: Iterable[torch.Tensor],
max_norm: float,
norm_type: float = 2.0,
error_if_nonfinite: bool = False,
foreach: bool | None = None,
) -> torch.Tensor:
"""
Clip the gradient norm of an iterable of parameters.
"""
# Filter out parameters with no gradients
parameters = [p for p in parameters if p.grad is not None]
assert len(parameters) > 0, "No parameters with gradients found"
# Calculate total norm
if norm_type == float("inf"):
total_norm = max(p.grad.detach().abs().max() for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type) for p in parameters]), norm_type)
# Convert DTensor to local tensor if needed
if isinstance(total_norm, DTensor):
total_norm = total_norm.full_tensor()
# Clip gradients
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.detach().mul_(clip_coef)
return total_norm
def check_params_sync(model_params, original_params):
"""
Check if original_params are being updated in sync with model parameters.
Args:
model_params: Iterator of model parameters after update
original_params: List of original parameters before DDP wrapping
"""
for mp, op in zip(model_params, original_params):
if isinstance(mp, DTensor):
mp = mp.to_local()
if isinstance(op, DTensor):
op = op.to_local()
if not torch.allclose(mp.data, op.data, rtol=0, atol=0):
raise RuntimeError(f"Parameters out of sync: model param {mp.data} != original param {op.data}")
return True
def get_parameters(model: nn.Module) -> Iterable[torch.Tensor]:
"""
Get all parameters from a model by iterating over its modules.
This is an alternative to model.parameters() that works with DTensor models.
Args:
model (nn.Module): The model to get parameters from
Returns:
Iterable[torch.Tensor]: An iterator over all parameters in the model
"""
for module in model._modules.values():
# Look for parameters in module attributes
for attr in module.__dict__.values():
if isinstance(attr, torch.Tensor) and attr.requires_grad:
yield attr
# Recursively get parameters from submodules
yield from get_parameters(module)
def update_model_parameters(model: nn.Module) -> None:
"""
Update model._parameters using named_modules() to ensure all parameters are properly tracked.
Args:
model (nn.Module): The model to update parameters for
"""
# Clear existing parameters
model._parameters = {}
# Add parameters from named_modules
for name, module in model.named_modules():
# Skip the root module itself
if name == "":
continue
# Get the parameter name by removing 'module.' prefix if it exists
param_name = name.replace("module.", "")
# Add weight and bias parameters if they exist
if hasattr(module, "weight") and module.weight is not None:
model._parameters[f"{param_name}.weight"] = module.weight
if hasattr(module, "bias") and module.bias is not None:
model._parameters[f"{param_name}.bias"] = module.bias
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/pytorch/3d_parallel_checks.py",
"license": "Apache License 2.0",
"lines": 683,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:examples/pytorch/context_parallel.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.distributed as dist
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.experimental import context_parallel
from torch.nn.attention import SDPBackend, sdpa_kernel
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoModelForCausalLM
from transformers.loss.loss_utils import ForCausalLMLoss
world_size = int(os.environ.get("WORLD_SIZE", "1"))
cp_mesh = init_device_mesh("cuda", (world_size,))
rank = torch.distributed.get_node_local_rank()
device = "cuda"
dtype = torch.bfloat16
sdpa_backend = SDPBackend.FLASH_ATTENTION
# prepare inputs
batch_size = 1
seq_len = 128
input_ids = torch.randint(low=8, high=64, size=(batch_size, seq_len), device=device)
ignore_index = -100
# When using CP, we need to use `shift_labels`
shift_labels = torch.nn.functional.pad(input_ids, (0, 1), value=ignore_index)
shift_labels = shift_labels[..., 1:].contiguous()
position_ids = (
torch.cumsum(torch.ones(size=input_ids.size(), dtype=input_ids.dtype, device=input_ids.device), dim=1) - 1
)
# sync input as they are created randomly
dist.broadcast(input_ids, src=0)
dist.broadcast(shift_labels, src=0)
dist.broadcast(position_ids, src=0)
# model and optimizer
repo_id = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
model = AutoModelForCausalLM.from_pretrained(repo_id, dtype=dtype, device_map=device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
model.train()
model.zero_grad()
optimizer.zero_grad()
# For loss
vocab_size = model.config.vocab_size
# so training could be synced
model = DDP(model, device_ids=[rank])
# prepare for CP
buffers = (input_ids, shift_labels, position_ids)
buffer_seq_dims = (1, 1, 1)
# `no_restore_buffers=set(buffers)` is required if `loss.backward` is outside `context_parallel`.
# no_restore_buffers = set(buffers)
no_restore_buffers = None
# run with CP
with sdpa_kernel(sdpa_backend):
with context_parallel(
cp_mesh,
buffers=buffers,
buffer_seq_dims=buffer_seq_dims,
no_restore_buffers=no_restore_buffers,
):
outputs = model(input_ids, shift_labels=shift_labels, position_ids=position_ids)
print(outputs.logits.shape)
# So far we need to compute `loss` outside `model.forward` when using `shift_labels`
# loss = outputs.loss
loss = ForCausalLMLoss(logits=outputs.logits, labels=None, shift_labels=shift_labels, vocab_size=vocab_size)
# This could be outside `context_parallel` context if `no_restore_buffers` is specified
loss.backward()
optimizer.step()
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/pytorch/context_parallel.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vilt/image_processing_vilt_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Vilt."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
get_max_height_width,
group_images_by_shape,
reorder_images,
)
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling, SizeDict
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_vilt import ViltImageProcessorKwargs
# Set maximum size based on the typical aspect ratio of the COCO dataset
MAX_LONGER_EDGE = 1333
MAX_SHORTER_EDGE = 800
@auto_docstring
class ViltImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"shortest_edge": 384}
do_resize = True
do_rescale = True
do_normalize = True
size_divisor = 32
do_pad = True
default_to_square = False
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = ViltImageProcessorKwargs
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
size_divisor: int | None,
do_pad: bool,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or batch of images.
This method overrides the base class method to include padding and pixel mask generation.
"""
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size, interpolation, size_divisor)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Handle padding if required
data = {}
if do_pad:
pixel_values, pixel_mask = self._pad_batch(
processed_images, return_tensors, disable_grouping=disable_grouping
)
data = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
else:
# If no padding, just return the processed images
if return_tensors == "pt":
processed_images = torch.stack(processed_images)
data = {"pixel_values": processed_images}
return BatchFeature(data=data, tensor_type=return_tensors)
def resize(
self,
images: "torch.Tensor",
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"] = None,
size_divisor: int | None = None,
) -> "torch.Tensor":
"""
Resize an image or batch of images to specified size.
Args:
images (`torch.Tensor`): Image or batch of images to resize.
size (`dict[str, int]`): Size dictionary with shortest_edge key.
interpolation (`tvF.InterpolationMode`, *optional*): Interpolation method to use.
size_divisor (`int`, *optional*): Value to ensure height/width are divisible by.
Returns:
`torch.Tensor`: Resized image or batch of images.
"""
if interpolation is None:
interpolation = self.resample
# Resize with aspect ratio preservation
shorter = size.shortest_edge
longer = int(MAX_LONGER_EDGE / MAX_SHORTER_EDGE * shorter)
heights = images.shape[-2]
widths = images.shape[-1]
# Determine the new dimensions
if heights < widths:
new_heights = shorter
new_widths = widths * (shorter / heights)
else:
new_heights = heights * (shorter / widths)
new_widths = shorter
# Check if the longer side exceeds max size
if max(new_heights, new_widths) > longer:
scale = longer / max(new_heights, new_widths)
new_heights = new_heights * scale
new_widths = new_widths * scale
new_heights = int(new_heights + 0.5)
new_widths = int(new_widths + 0.5)
# Make dimensions divisible by size_divisor
if size_divisor is not None:
new_heights = new_heights // size_divisor * size_divisor
new_widths = new_widths // size_divisor * size_divisor
# Resize the image
return tvF.resize(images, [new_heights, new_widths], interpolation=interpolation)
def _pad_batch(
self,
images: list["torch.Tensor"],
return_tensors: str | TensorType | None,
disable_grouping: bool | None,
) -> tuple:
"""
Pad a batch of images to the same size based on the maximum dimensions.
Args:
images (`list[torch.Tensor]`): List of images to pad.
return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return.
Returns:
`tuple`: Tuple containing padded images and pixel masks.
"""
# Calculate global maximum dimensions across all images
max_size = get_max_height_width(images)
# Group images by shape before padding
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images = {}
processed_masks = {}
for shape, stacked_images in grouped_images.items():
# Create mask template for efficient masking
if return_tensors == "pt" and len(stacked_images) > 0:
device = stacked_images.device
mask_template = torch.zeros(max_size, dtype=torch.int64, device=device)
original_size = stacked_images.shape[-2:]
needs_padding = original_size[0] != max_size[0] or original_size[1] != max_size[1]
if needs_padding:
padding_bottom = max_size[0] - original_size[0]
padding_right = max_size[1] - original_size[1]
padding = [0, 0, padding_right, padding_bottom]
padded_images = tvF.pad(stacked_images, padding, fill=0)
pixel_mask = mask_template.clone()
pixel_mask[: original_size[0], : original_size[1]].fill_(1)
pixel_masks = pixel_mask.unsqueeze(0).repeat(stacked_images.shape[0], 1, 1)
else:
padded_images = stacked_images
pixel_masks = torch.ones(
(stacked_images.shape[0], max_size[0], max_size[1]),
dtype=torch.int64,
device=stacked_images.device,
)
# Store processed group
processed_images[shape] = padded_images
processed_masks[shape] = pixel_masks
# Reorder images back to original order
padded_images = reorder_images(processed_images, grouped_images_index)
pixel_masks = reorder_images(processed_masks, grouped_images_index)
# Stack if tensors are requested for final result
if return_tensors == "pt" and padded_images:
padded_images = torch.stack(padded_images)
pixel_masks = torch.stack(pixel_masks)
return padded_images, pixel_masks
__all__ = ["ViltImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vilt/image_processing_vilt_fast.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/phi4_multimodal/test_image_processing_phi4_multimodal.py | # Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import unittest
import warnings
import numpy as np
import pytest
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
if is_torchvision_available():
from transformers import Phi4MultimodalImageProcessorFast
class Phi4MultimodalImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=100,
min_resolution=30,
max_resolution=400,
dynamic_hd=36,
do_resize=True,
size=None,
patch_size=14,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 100, "width": 100}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.dynamic_hd = dynamic_hd
self.do_resize = do_resize
self.size = size
self.patch_size = patch_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"patch_size": self.patch_size,
"dynamic_hd": self.dynamic_hd,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
max_num_patches = 0
for image in images:
if isinstance(image, Image.Image):
width, height = image.size
elif isinstance(image, np.ndarray):
height, width = image.shape[:2]
elif isinstance(image, torch.Tensor):
height, width = image.shape[-2:]
w_crop_num = math.ceil(width / float(self.size["width"]))
h_crop_num = math.ceil(height / float(self.size["height"]))
num_patches = min(w_crop_num * h_crop_num + 1, self.dynamic_hd)
max_num_patches = max(max_num_patches, num_patches)
num_patches = max_num_patches
return num_patches, self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Phi4MultimodalImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
fast_image_processing_class = Phi4MultimodalImageProcessorFast if is_torchvision_available() else None
test_slow_image_processor = False
def setUp(self):
super().setUp()
self.image_processor_tester = Phi4MultimodalImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 100, "width": 100})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
@unittest.skip(reason="Phi4MultimodalImageProcessorFast doesn't treat 4 channel PIL and numpy consistently yet")
def test_call_numpy_4_channels(self):
pass
def test_cast_dtype_device(self):
for image_processing_class in self.image_processor_list:
if self.test_cast_dtype is not None:
# Initialize image_processor
image_processor = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
encoding = image_processor(image_inputs, return_tensors="pt")
# for layoutLM compatibility
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
self.assertEqual(encoding.image_pixel_values.dtype, torch.float32)
encoding = image_processor(image_inputs, return_tensors="pt").to(torch.float16)
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
self.assertEqual(encoding.image_pixel_values.dtype, torch.float16)
encoding = image_processor(image_inputs, return_tensors="pt").to("cpu", torch.bfloat16)
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
self.assertEqual(encoding.image_pixel_values.dtype, torch.bfloat16)
with self.assertRaises(TypeError):
_ = image_processor(image_inputs, return_tensors="pt").to(torch.bfloat16, "cpu")
# Try with text + image feature
encoding = image_processor(image_inputs, return_tensors="pt")
encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])})
encoding = encoding.to(torch.float16)
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
self.assertEqual(encoding.image_pixel_values.dtype, torch.float16)
self.assertEqual(encoding.input_ids.dtype, torch.long)
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").image_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").image_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").image_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").image_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").image_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
encoded_images = image_processing(image_inputs, return_tensors="pt").image_pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
def test_image_processor_preprocess_arguments(self):
is_tested = False
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
# validation done by _valid_processor_keys attribute
if hasattr(image_processor, "_valid_processor_keys") and hasattr(image_processor, "preprocess"):
preprocess_parameter_names = inspect.getfullargspec(image_processor.preprocess).args
preprocess_parameter_names.remove("self")
preprocess_parameter_names.sort()
valid_processor_keys = image_processor._valid_processor_keys
valid_processor_keys.sort()
self.assertEqual(preprocess_parameter_names, valid_processor_keys)
is_tested = True
# validation done by @filter_out_non_signature_kwargs decorator
if hasattr(image_processor.preprocess, "_filter_out_non_signature_kwargs"):
if hasattr(self.image_processor_tester, "prepare_image_inputs"):
inputs = self.image_processor_tester.prepare_image_inputs()
elif hasattr(self.image_processor_tester, "prepare_video_inputs"):
inputs = self.image_processor_tester.prepare_video_inputs()
else:
self.skipTest(reason="No valid input preparation method found")
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
image_processor(inputs, extra_argument=True)
messages = " ".join([str(w.message) for w in raised_warnings])
self.assertGreaterEqual(len(raised_warnings), 1)
self.assertIn("extra_argument", messages)
is_tested = True
if not is_tested:
self.skipTest(reason="No validation found for `preprocess` method")
@slow
@pytest.mark.torch_compile_test
def test_can_compile_fast_image_processor(self):
if self.fast_image_processing_class is None:
self.skipTest("Skipping compilation test as fast image processor is not defined")
torch.compiler.reset()
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
torch.testing.assert_close(
output_eager.image_pixel_values, output_compiled.image_pixel_values, rtol=1e-4, atol=1e-4
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/phi4_multimodal/test_image_processing_phi4_multimodal.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/trainer/test_trainer_distributed_worker_seed.py | import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_accelerator,
run_first,
torch_device,
)
def gather_from_all_gpus(tensor, world_size):
# Prepare a list to gather tensors from all processes
gather_list = [torch.zeros_like(tensor) for _ in range(world_size)]
dist.all_gather(gather_list, tensor)
return gather_list # List of tensors from all ranks
class DummyDataset(Dataset):
def __init__(self):
self.length = 64
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
x = random.random()
y = np.random.random()
z = torch.rand([]).item()
return {"x": torch.tensor([x, y, z])}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x):
local_tensor = torch.tensor(x, device=torch_device)
gathered = gather_from_all_gpus(local_tensor, dist.get_world_size())
assert not all(torch.allclose(t, gathered[0]) for t in gathered[1:])
y = self.fc(x)
return (y.mean(), y)
class TestTrainerDistributedWorkerSeed(TestCasePlus):
@run_first
@require_torch_multi_accelerator
def test_trainer(self):
device_count = backend_device_count(torch_device)
output_dir = self.get_auto_remove_tmp_dir()
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_worker_seed.py
""".split()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
def run_distributed_training(training_args):
set_seed(42)
model = DummyModel()
dataset = DummyDataset()
training_args.max_steps = 10
# dataloader_num_workers must be > 0 to enable worker_init_fn
training_args.dataloader_num_workers = 2
trainer = Trainer(
model,
training_args,
train_dataset=dataset,
)
trainer.train()
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/test_trainer_distributed_worker_seed.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/auto/video_processing_auto.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoVideoProcessor class."""
import importlib
import os
from collections import OrderedDict
from typing import TYPE_CHECKING
# Build the list of all video processors
from ...configuration_utils import PreTrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...utils import (
CONFIG_NAME,
IMAGE_PROCESSOR_NAME,
PROCESSOR_NAME,
VIDEO_PROCESSOR_NAME,
cached_file,
is_torchvision_available,
logging,
safe_load_json_file,
)
from ...utils.import_utils import requires
from ...video_processing_utils import BaseVideoProcessor
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
logger = logging.get_logger(__name__)
if TYPE_CHECKING:
# This significantly improves completion suggestion performance when
# the transformers package is used with Microsoft's Pylance language server.
VIDEO_PROCESSOR_MAPPING_NAMES: OrderedDict[str, tuple[str | None, str | None]] = OrderedDict()
else:
VIDEO_PROCESSOR_MAPPING_NAMES = OrderedDict(
[
("ernie4_5_vl_moe", "Ernie4_5_VLMoeVideoProcessor"),
("glm46v", "Glm46VVideoProcessor"),
("glm4v", "Glm4vVideoProcessor"),
("instructblip", "InstructBlipVideoVideoProcessor"),
("instructblipvideo", "InstructBlipVideoVideoProcessor"),
("internvl", "InternVLVideoProcessor"),
("llava_next_video", "LlavaNextVideoVideoProcessor"),
("llava_onevision", "LlavaOnevisionVideoProcessor"),
("pe_audio_video", "PeVideoVideoProcessor"),
("pe_video", "PeVideoVideoProcessor"),
("perception_lm", "PerceptionLMVideoProcessor"),
("qwen2_5_omni", "Qwen2VLVideoProcessor"),
("qwen2_5_vl", "Qwen2VLVideoProcessor"),
("qwen2_vl", "Qwen2VLVideoProcessor"),
("qwen3_5", "Qwen3VLVideoProcessor"),
("qwen3_5_moe", "Qwen3VLVideoProcessor"),
("qwen3_omni_moe", "Qwen2VLVideoProcessor"),
("qwen3_vl", "Qwen3VLVideoProcessor"),
("qwen3_vl_moe", "Qwen3VLVideoProcessor"),
("sam2_video", "Sam2VideoVideoProcessor"),
("sam3_video", "Sam3VideoVideoProcessor"),
("smolvlm", "SmolVLMVideoProcessor"),
("video_llama_3", "VideoLlama3VideoProcessor"),
("video_llava", "VideoLlavaVideoProcessor"),
("videomae", "VideoMAEVideoProcessor"),
("vjepa2", "VJEPA2VideoProcessor"),
]
)
for model_type, video_processors in VIDEO_PROCESSOR_MAPPING_NAMES.items():
fast_video_processor_class = video_processors
# If the torchvision is not available, we set it to None
if not is_torchvision_available():
fast_video_processor_class = None
VIDEO_PROCESSOR_MAPPING_NAMES[model_type] = fast_video_processor_class
VIDEO_PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, VIDEO_PROCESSOR_MAPPING_NAMES)
def video_processor_class_from_name(class_name: str):
for module_name, extractor in VIDEO_PROCESSOR_MAPPING_NAMES.items():
if class_name == extractor:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f".{module_name}", "transformers.models")
try:
return getattr(module, class_name)
except AttributeError:
continue
for extractor in VIDEO_PROCESSOR_MAPPING._extra_content.values():
if getattr(extractor, "__name__", None) == class_name:
return extractor
# We did not find the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
main_module = importlib.import_module("transformers")
if hasattr(main_module, class_name):
return getattr(main_module, class_name)
return None
def get_video_processor_config(
pretrained_model_name_or_path: str | os.PathLike,
cache_dir: str | os.PathLike | None = None,
force_download: bool = False,
proxies: dict[str, str] | None = None,
token: bool | str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
):
"""
Loads the video processor configuration from a pretrained model video processor configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~BaseVideoProcessor.save_pretrained`] method, e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the video processor configuration from local files.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Returns:
`Dict`: The configuration of the video processor.
Examples:
```python
# Download configuration from huggingface.co and cache.
video_processor_config = get_video_processor_config("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
# This model does not have a video processor config so the result will be an empty dict.
video_processor_config = get_video_processor_config("FacebookAI/xlm-roberta-base")
# Save a pretrained video processor locally and you can reload its config
from transformers import AutoVideoProcessor
video_processor = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
video_processor.save_pretrained("video-processor-test")
video_processor = get_video_processor_config("video-processor-test")
```"""
# Load with a priority given to the nested processor config, if available in repo
resolved_processor_file = cached_file(
pretrained_model_name_or_path,
filename=PROCESSOR_NAME,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
token=token,
revision=revision,
local_files_only=local_files_only,
_raise_exceptions_for_gated_repo=False,
_raise_exceptions_for_missing_entries=False,
)
resolved_video_processor_files = [
resolved_file
for filename in [VIDEO_PROCESSOR_NAME, IMAGE_PROCESSOR_NAME]
if (
resolved_file := cached_file(
pretrained_model_name_or_path,
filename=filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
token=token,
revision=revision,
local_files_only=local_files_only,
_raise_exceptions_for_gated_repo=False,
_raise_exceptions_for_missing_entries=False,
_raise_exceptions_for_connection_errors=False,
)
)
is not None
]
resolved_video_processor_file = resolved_video_processor_files[0] if resolved_video_processor_files else None
# An empty list if none of the possible files is found in the repo
if not resolved_video_processor_file and not resolved_processor_file:
logger.info("Could not locate the video processor configuration file.")
return {}
# Load video_processor dict. Priority goes as (nested config if found -> video processor config -> image processor config)
# We are downloading both configs because almost all models have a `processor_config.json` but
# not all of these are nested. We need to check if it was saved recebtly as nested or if it is legacy style
video_processor_dict = {}
if resolved_processor_file is not None:
processor_dict = safe_load_json_file(resolved_processor_file)
if "video_processor" in processor_dict:
video_processor_dict = processor_dict["video_processor"]
if resolved_video_processor_file is not None and video_processor_dict is None:
video_processor_dict = safe_load_json_file(resolved_video_processor_file)
return video_processor_dict
@requires(backends=("vision", "torchvision"))
class AutoVideoProcessor:
r"""
This is a generic video processor class that will be instantiated as one of the video processor classes of the
library when created with the [`AutoVideoProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError(
"AutoVideoProcessor is designed to be instantiated "
"using the `AutoVideoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(VIDEO_PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the video processor classes of the library from a pretrained model vocabulary.
The video processor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained video_processor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a video processor file saved using the
[`~video_processing_utils.BaseVideoProcessor.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved video processor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model video processor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the video processor files and override the cached versions if
they exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final video processor object. If `True`, then this
functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
`kwargs` which has not been used to update `video_processor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are video processor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoVideoProcessor
>>> # Download video processor from huggingface.co and cache.
>>> video_processor = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
>>> # If video processor files are in a directory (e.g. video processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # video_processor = AutoVideoProcessor.from_pretrained("./test/saved_model/")
```"""
config = kwargs.pop("config", None)
trust_remote_code = kwargs.pop("trust_remote_code", None)
kwargs["_from_auto"] = True
config_dict, _ = BaseVideoProcessor.get_video_processor_dict(pretrained_model_name_or_path, **kwargs)
video_processor_class = config_dict.get("video_processor_type", None)
video_processor_auto_map = None
if "AutoVideoProcessor" in config_dict.get("auto_map", {}):
video_processor_auto_map = config_dict["auto_map"]["AutoVideoProcessor"]
# If we still don't have the video processor class, check if we're loading from a previous image processor config
# and if so, infer the video processor class from there.
if video_processor_class is None and video_processor_auto_map is None:
image_processor_class = config_dict.pop("image_processor_type", None)
if image_processor_class is not None:
video_processor_class_inferred = image_processor_class.replace("ImageProcessor", "VideoProcessor")
# Some models have different image processors, e.g. InternVL uses GotOCRImageProcessor
# We cannot use GotOCRVideoProcessor when falling back for BC and should try to infer from config later on
if video_processor_class_from_name(video_processor_class_inferred) is not None:
video_processor_class = video_processor_class_inferred
if "AutoImageProcessor" in config_dict.get("auto_map", {}):
image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"]
video_processor_auto_map = image_processor_auto_map.replace("ImageProcessor", "VideoProcessor")
# If we don't find the video processor class in the video processor config, let's try the model config.
if video_processor_class is None and video_processor_auto_map is None:
if not isinstance(config, PreTrainedConfig):
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
# It could be in `config.video_processor_type``
video_processor_class = getattr(config, "video_processor_type", None)
if hasattr(config, "auto_map") and "AutoVideoProcessor" in config.auto_map:
video_processor_auto_map = config.auto_map["AutoVideoProcessor"]
if video_processor_class is not None:
video_processor_class = video_processor_class_from_name(video_processor_class)
has_remote_code = video_processor_auto_map is not None
has_local_code = video_processor_class is not None or type(config) in VIDEO_PROCESSOR_MAPPING
if has_remote_code:
if "--" in video_processor_auto_map:
upstream_repo = video_processor_auto_map.split("--")[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(
trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo
)
if has_remote_code and trust_remote_code:
class_ref = video_processor_auto_map
video_processor_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop("code_revision", None)
video_processor_class.register_for_auto_class()
return video_processor_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif video_processor_class is not None:
return video_processor_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# Last try: we use the VIDEO_PROCESSOR_MAPPING.
elif type(config) in VIDEO_PROCESSOR_MAPPING:
video_processor_class = VIDEO_PROCESSOR_MAPPING[type(config)]
if video_processor_class is not None:
return video_processor_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# Raise a more informative error message if torchvision isn't found, otherwise just fallback to default
if not is_torchvision_available():
raise ValueError(
f"{pretrained_model_name_or_path} requires `torchvision` to be installed. Please install `torchvision` and try again."
)
raise ValueError(
f"Unrecognized video processor in {pretrained_model_name_or_path}. Should have a "
f"`video_processor_type` key in its {VIDEO_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in VIDEO_PROCESSOR_MAPPING_NAMES)}"
)
@staticmethod
def register(
config_class,
video_processor_class,
exist_ok=False,
):
"""
Register a new video processor for this class.
Args:
config_class ([`PreTrainedConfig`]):
The configuration corresponding to the model to register.
video_processor_class ([`BaseVideoProcessor`]):
The video processor to register.
"""
VIDEO_PROCESSOR_MAPPING.register(config_class, video_processor_class, exist_ok=exist_ok)
__all__ = ["VIDEO_PROCESSOR_MAPPING", "AutoVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/auto/video_processing_auto.py",
"license": "Apache License 2.0",
"lines": 357,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Video processor class for InstructBLIPVideo
"""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling, SizeDict
from ...utils import TensorType
from ...video_processing_utils import BaseVideoProcessor
from ...video_utils import group_videos_by_shape, reorder_videos
class InstructBlipVideoVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
default_to_square = True
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
model_input_names = ["pixel_values"]
def _preprocess(
self,
videos: list["torch.Tensor"],
do_convert_rgb: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchFeature:
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_convert_rgb:
stacked_videos = self.convert_to_rgb(stacked_videos)
if do_resize:
stacked_videos = self.resize(stacked_videos, size=size, interpolation=interpolation)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_center_crop:
stacked_videos = self.center_crop(stacked_videos, crop_size)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_videos_grouped[shape] = stacked_videos
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
return BatchFeature(data={"pixel_values": processed_videos}, tensor_type=return_tensors)
__all__ = ["InstructBlipVideoVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/internvl/video_processing_internvl.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Video processor class for InternVL."""
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling, SizeDict
from ...processing_utils import Unpack, VideosKwargs
from ...utils import TensorType
from ...video_processing_utils import BaseVideoProcessor
from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos
class InternVLVideoProcessorInitKwargs(VideosKwargs, total=False):
initial_shift: bool | float | int
class InternVLVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
initial_shift = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
valid_kwargs = InternVLVideoProcessorInitKwargs
def __init__(self, **kwargs: Unpack[InternVLVideoProcessorInitKwargs]):
super().__init__(**kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
num_frames: int | None = None,
fps: int | float | None = None,
initial_shift: bool | float | int | None = None,
**kwargs,
):
"""
Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
and `fps` are mutually exclusive.
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
num_frames (`int`, *optional*):
Maximum number of frames to sample. Defaults to `self.num_frames`.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
initial_shift (`bool`, `float` or `int`, defaults to `self.initial_shift`):
The initial shift to apply when sampling frames. If `True`, the shift is set so that frames are sampled from the middle of the video.
Returns:
np.ndarray:
Indices to sample video frames.
"""
num_frames = num_frames if num_frames is not None else self.num_frames
initial_shift = initial_shift if initial_shift is not None else self.initial_shift
total_num_frames = metadata.total_num_frames
# If num_frames is not given but fps is, calculate num_frames from fps
if num_frames is None and fps is not None:
if metadata is None or metadata.fps is None:
raise ValueError(
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
)
num_frames = int(total_num_frames / metadata.fps * fps)
if initial_shift is True:
initial_shift = total_num_frames / num_frames / 2
if num_frames > total_num_frames:
raise ValueError(
f"Video can't be sampled. The `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
)
indices = torch.arange(initial_shift, total_num_frames, total_num_frames / num_frames).int()
return indices
def _preprocess(
self,
videos: list["torch.Tensor"],
do_convert_rgb: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchFeature:
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_convert_rgb:
stacked_videos = self.convert_to_rgb(stacked_videos)
if do_resize:
stacked_videos = self.resize(stacked_videos, size=size, interpolation=interpolation)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_center_crop:
stacked_videos = self.center_crop(stacked_videos, crop_size)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_videos_grouped[shape] = stacked_videos
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
return BatchFeature(data={"pixel_values_videos": processed_videos}, tensor_type=return_tensors)
__all__ = ["InternVLVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/internvl/video_processing_internvl.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/llava_next_video/video_processing_llava_next_video.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for LLaVa-NeXT-Video."""
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...video_processing_utils import BaseVideoProcessor
class LlavaNextVideoVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
__all__ = ["LlavaNextVideoVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/llava_next_video/video_processing_llava_next_video.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py | # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""video processor class for Qwen2-VL."""
import math
from typing import Optional
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
PILImageResampling,
SizeDict,
get_image_size,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import TensorType, add_start_docstrings
from ...video_processing_utils import BASE_VIDEO_PROCESSOR_DOCSTRING, BaseVideoProcessor
from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos
from .image_processing_qwen2_vl import smart_resize
class Qwen2VLVideoProcessorInitKwargs(VideosKwargs, total=False):
min_pixels: int
max_pixels: int
patch_size: int
temporal_patch_size: int
merge_size: int
min_frames: int
max_frames: int
@add_start_docstrings(
"Constructs a fast Qwen2-VL image processor that dynamically resizes videos based on the original videos.",
BASE_VIDEO_PROCESSOR_DOCSTRING,
"""
min_pixels (`int`, *optional*, defaults to `56 * 56`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spacial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
min_frames (`int`, *optional*, defaults to 4):
The minimum number of frames that can be sampled.
max_frames (`int`, *optional*, defaults to 768):
The maximum number of frames that can be sampled.
""",
)
class Qwen2VLVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 128 * 28 * 28, "longest_edge": 28 * 28 * 768}
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
patch_size = 14
temporal_patch_size = 2
merge_size = 2
min_frames = 4
max_frames = 768
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
valid_kwargs = Qwen2VLVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos", "video_grid_thw"]
def __init__(self, **kwargs: Unpack[Qwen2VLVideoProcessorInitKwargs]):
size = kwargs.pop("size", None)
min_pixels = kwargs.pop("min_pixels", None)
max_pixels = kwargs.pop("max_pixels", None)
# backward compatibility: override size with min_pixels and max_pixels if they are provided
size = self.size if size is None else size
if min_pixels is not None:
size["shortest_edge"] = min_pixels
size.pop("min_pixels", None)
if max_pixels is not None:
size["longest_edge"] = max_pixels
size.pop("max_pixels", None)
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
super().__init__(size=size, **kwargs)
def _further_process_kwargs(
self,
size: SizeDict | None = None,
min_pixels: int | None = None,
max_pixels: int | None = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if min_pixels is not None and max_pixels is not None:
size = {"shortest_edge": min_pixels, "longest_edge": max_pixels}
elif size is not None:
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("dictionary `size` must contain 'shortest_edge' and 'longest_edge' keys.")
min_pixels = size["shortest_edge"]
max_pixels = size["longest_edge"]
else:
size = {**self.size}
return super()._further_process_kwargs(size=size, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
temporal_patch_size: int | None = None,
min_frames: int | None = None,
max_frames: int | None = None,
num_frames: int | None = None,
fps: int | float | None = None,
**kwargs,
):
"""
Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
and `fps` are mutually exclusive.
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
temporal_patch_size (`int`, *optional*):
The temporal patch size of the vision encoder. Number of sampled frames will be rounded to be divisible by frame factor.
min_frames (`int`, *optional*):
The minimum number of frames that can be sampled.
max_frames (`int`, *optional*):
The maximum number of frames that can be sampled.
num_frames (`int`, *optional*):
Maximum number of frames to sample. Defaults to `self.num_frames`.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if fps is not None and num_frames is not None:
raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!")
num_frames = num_frames if num_frames is not None else self.num_frames
fps = fps if fps is not None else self.fps
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
min_frames = min_frames if min_frames is not None else self.min_frames
max_frames = max_frames if max_frames is not None else self.max_frames
total_num_frames = metadata.total_num_frames
# If num_frames is not given but fps is, calculate num_frames from fps
if num_frames is not None:
num_frames = round(num_frames / temporal_patch_size) * temporal_patch_size
elif fps is not None:
if metadata is None or metadata.fps is None:
raise ValueError(
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
)
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
num_frames = total_num_frames / metadata.fps * fps
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
if num_frames > total_num_frames:
raise ValueError(
f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
"Decrease `num_frames` or `fps` for sampling."
)
if num_frames is not None:
indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()
else:
indices = torch.arange(0, total_num_frames).int()
return indices
def _preprocess(
self,
videos: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
patch_size: int | None = None,
temporal_patch_size: int | None = None,
merge_size: int | None = None,
return_tensors: str | TensorType | None = None,
**kwargs,
):
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
resized_height, resized_width = height, width
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
stacked_videos = self.resize(
image=stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
T = patches.shape[1]
if pad := -T % temporal_patch_size:
repeats = patches[:, -1:].expand(-1, pad, -1, -1, -1)
patches = torch.cat((patches, repeats), dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
return BatchFeature(
data={"pixel_values_videos": pixel_values_videos, "video_grid_thw": video_grid_thw},
tensor_type=return_tensors,
)
def get_num_of_video_patches(self, num_frames: int, height: int, width: int, videos_kwargs=None):
"""
A utility that returns number of video patches a given video size.
Args:
num_frames (`int`):
Number of frames in the input video.
height (`int`):
Height of the input video.
width (`int`):
Width of the input video.
videos_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the video processor.
Returns:
`Tuple(int, int)`: Number of placeholder tokens required and number of patches per image.
"""
min_pixels = videos_kwargs.get("min_pixels", None) or self.size["shortest_edge"]
max_pixels = videos_kwargs.get("max_pixels", None) or self.size["longest_edge"]
patch_size = videos_kwargs.get("patch_size", None) or self.patch_size
merge_size = videos_kwargs.get("merge_size", None) or self.merge_size
temporal_patch_size = videos_kwargs.get("temporal_patch_size", None) or self.temporal_patch_size
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
grid_t = num_frames // temporal_patch_size
return grid_t * grid_h * grid_w
__all__ = ["Qwen2VLVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/video_llava/video_processing_video_llava.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for Video-LLaVA."""
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...video_processing_utils import BaseVideoProcessor
class VideoLlavaVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
__all__ = ["VideoLlavaVideoProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/video_llava/video_processing_video_llava.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/video_processing_utils.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from collections.abc import Callable
from copy import deepcopy
from functools import partial
from typing import Any, Optional
import numpy as np
from huggingface_hub import create_repo, is_offline_mode
from huggingface_hub.dataclasses import validate_typed_dict
from .dynamic_module_utils import custom_object_save
from .image_processing_utils import (
BatchFeature,
get_size_dict,
)
from .image_processing_utils_fast import BaseImageProcessorFast
from .image_utils import (
ChannelDimension,
SizeDict,
validate_kwargs,
)
from .processing_utils import Unpack, VideosKwargs
from .utils import (
IMAGE_PROCESSOR_NAME,
PROCESSOR_NAME,
VIDEO_PROCESSOR_NAME,
TensorType,
add_start_docstrings,
copy_func,
is_torch_available,
is_torchcodec_available,
is_torchvision_v2_available,
logging,
safe_load_json_file,
)
from .utils.hub import cached_file
from .utils.import_utils import requires
from .video_utils import (
VideoInput,
VideoMetadata,
group_videos_by_shape,
infer_channel_dimension_format,
is_valid_video,
load_video,
make_batched_metadata,
make_batched_videos,
reorder_videos,
)
if is_torch_available():
import torch
if is_torchvision_v2_available():
import torchvision.transforms.v2.functional as tvF
logger = logging.get_logger(__name__)
BASE_VIDEO_PROCESSOR_DOCSTRING = r"""
Args:
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `self.size`):
Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
The size by which to make sure both the height and width can be divided.
default_to_square (`bool`, *optional*, defaults to `self.default_to_square`):
Whether to default to a square video when resizing, if size is an int.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to `self.crop_size`):
Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the video. This is a float or list of floats the length of the number of
channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the video. This is a float or list of floats the length of the
number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`):
Whether to convert the video to RGB.
video_metadata (`VideoMetadata`, *optional*):
Metadata of the video containing information about total duration, fps and total number of frames.
do_sample_frames (`int`, *optional*, defaults to `self.do_sample_frames`):
Whether to sample frames from the video before processing or to process the whole video.
num_frames (`int`, *optional*, defaults to `self.num_frames`):
Maximum number of frames to sample when `do_sample_frames=True`.
fps (`int` or `float`, *optional*, defaults to `self.fps`):
Target frames to sample per second when `do_sample_frames=True`.
return_tensors (`str` or `TensorType`, *optional*):
Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output video. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input video.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input video. If unset, the channel dimension format is inferred
from the input video. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: video in (height, width) format.
device (`torch.device`, *optional*):
The device to process the videos on. If unset, the device is inferred from the input videos.
return_metadata (`bool`, *optional*):
Whether to return video metadata or not.
"""
@add_start_docstrings(
"Constructs a base VideoProcessor.",
BASE_VIDEO_PROCESSOR_DOCSTRING,
)
@requires(backends=("vision", "torchvision"))
class BaseVideoProcessor(BaseImageProcessorFast):
_auto_class = None
resample = None
image_mean = None
image_std = None
size = None
size_divisor = None
default_to_square = True
crop_size = None
do_resize = None
do_center_crop = None
do_rescale = None
rescale_factor = 1 / 255
do_normalize = None
do_convert_rgb = None
do_sample_frames = None
fps = None
num_frames = None
video_metadata = None
return_metadata = False
valid_kwargs = VideosKwargs
model_input_names = ["pixel_values_videos"]
def __init__(self, **kwargs: Unpack[VideosKwargs]) -> None:
super().__init__()
kwargs.pop("processor_class", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
# Prepare size related keys and turn then into `SizeDict`
size = kwargs.pop("size", self.size)
self.size = (
get_size_dict(size=size, default_to_square=kwargs.pop("default_to_square", self.default_to_square))
if size is not None
else None
)
crop_size = kwargs.pop("crop_size", self.crop_size)
self.crop_size = get_size_dict(crop_size, param_name="crop_size") if crop_size is not None else None
# Save valid kwargs in a list for further processing
self.model_valid_processing_keys = list(self.valid_kwargs.__annotations__.keys())
for key in self.model_valid_processing_keys:
if kwargs.get(key) is not None:
setattr(self, key, kwargs[key])
else:
setattr(self, key, deepcopy(getattr(self, key, None)))
def __call__(self, videos, **kwargs) -> BatchFeature:
return self.preprocess(videos, **kwargs)
def convert_to_rgb(
self,
video: "torch.Tensor",
) -> VideoInput:
"""
Converts a video to RGB format.
Args:
video (`"torch.Tensor"`):
The video to convert.
Returns:
`torch.Tensor`: The converted video.
"""
video = tvF.grayscale_to_rgb(video)
if video.shape[-3] == 3 or not (video[..., 3, :, :] < 255).any():
return video
# There is a transparency layer, blend it with a white background.
# Calculate the alpha proportion for blending.
alpha = video[..., 3, :, :] / 255.0
video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., :3, :, :]
return video
def sample_frames(
self,
metadata: VideoMetadata,
num_frames: int | None = None,
fps: int | float | None = None,
**kwargs,
):
"""
Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
and `fps` are mutually exclusive.
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
num_frames (`int`, *optional*):
Maximum number of frames to sample. Defaults to `self.num_frames`.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if fps is not None and num_frames is not None:
raise ValueError(
"`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!"
)
num_frames = num_frames if num_frames is not None else self.num_frames
fps = fps if fps is not None else self.fps
total_num_frames = metadata.total_num_frames
# If num_frames is not given but fps is, calculate num_frames from fps
if num_frames is None and fps is not None:
if metadata is None or metadata.fps is None:
raise ValueError(
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
)
num_frames = int(total_num_frames / metadata.fps * fps)
if num_frames > total_num_frames:
raise ValueError(
f"Video can't be sampled. The `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
)
if num_frames is not None:
indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()
else:
indices = torch.arange(0, total_num_frames).int()
return indices
def _decode_and_sample_videos(
self,
videos: VideoInput,
video_metadata: VideoMetadata | dict,
do_sample_frames: bool | None = None,
sample_indices_fn: Callable | None = None,
) -> list["torch.Tensor"]:
"""
Decode input videos and sample frames if needed.
"""
videos = make_batched_videos(videos)
video_metadata = make_batched_metadata(videos, video_metadata=video_metadata)
# Only sample frames if an array video is passed, otherwise first decode -> then sample
if is_valid_video(videos[0]) and do_sample_frames:
sampled_videos = []
sampled_metadata = []
for video, metadata in zip(videos, video_metadata):
indices = sample_indices_fn(metadata=metadata)
metadata.frames_indices = indices
sampled_videos.append(video[indices])
sampled_metadata.append(metadata)
videos = sampled_videos
video_metadata = sampled_metadata
elif not is_valid_video(videos[0]):
if isinstance(videos[0], list):
# Videos sometimes are passed as a list of image URLs, especially through templates
videos = [
torch.stack([tvF.pil_to_tensor(image) for image in images], dim=0)
for images in self.fetch_images(videos)
]
if do_sample_frames:
raise ValueError(
"Sampling frames from a list of images is not supported! Set `do_sample_frames=False`."
)
else:
videos, video_metadata = self.fetch_videos(videos, sample_indices_fn=sample_indices_fn)
return videos, video_metadata
def _prepare_input_videos(
self,
videos: VideoInput,
input_data_format: str | ChannelDimension | None = None,
device: str | None = None,
) -> list["torch.Tensor"]:
"""
Prepare the input videos for processing.
"""
processed_videos = []
for video in videos:
# `make_batched_videos` always returns a 4D array per video
if isinstance(video, np.ndarray):
# not using tvF.to_tensor as it doesn't handle (C, H, W) numpy arrays
video = torch.from_numpy(video).contiguous()
# Infer the channel dimension format if not provided
if input_data_format is None:
input_data_format = infer_channel_dimension_format(video)
if input_data_format == ChannelDimension.LAST:
video = video.permute(0, 3, 1, 2).contiguous()
if device is not None:
video = video.to(device)
processed_videos.append(video)
return processed_videos
@add_start_docstrings(
BASE_VIDEO_PROCESSOR_DOCSTRING,
)
def preprocess(
self,
videos: VideoInput,
**kwargs: Unpack[VideosKwargs],
) -> BatchFeature:
validate_kwargs(
captured_kwargs=kwargs.keys(),
valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"],
)
# Perform type validation on received kwargs
validate_typed_dict(self.valid_kwargs, kwargs)
# Set default kwargs from self. This ensures that if a kwarg is not provided
# by the user, it gets its default value from the instance, or is set to None.
for kwarg_name in self.valid_kwargs.__annotations__:
kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
input_data_format = kwargs.pop("input_data_format")
do_sample_frames = kwargs.pop("do_sample_frames")
device = kwargs.pop("device")
video_metadata = kwargs.pop("video_metadata")
sample_indices_fn = partial(self.sample_frames, **kwargs) if do_sample_frames else None
videos, video_metadata = self._decode_and_sample_videos(
videos,
video_metadata=video_metadata,
do_sample_frames=do_sample_frames,
sample_indices_fn=sample_indices_fn,
)
videos = self._prepare_input_videos(videos=videos, input_data_format=input_data_format, device=device)
kwargs = self._further_process_kwargs(**kwargs)
self._validate_preprocess_kwargs(**kwargs)
# Pop kwargs that are not needed in _preprocess
kwargs.pop("data_format")
return_metadata = kwargs.pop("return_metadata")
preprocessed_videos = self._preprocess(videos=videos, **kwargs)
if return_metadata:
preprocessed_videos["video_metadata"] = video_metadata
return preprocessed_videos
def _preprocess(
self,
videos: list["torch.Tensor"],
do_convert_rgb: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
return_tensors: str | TensorType | None = None,
**kwargs,
) -> BatchFeature:
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_convert_rgb:
stacked_videos = self.convert_to_rgb(stacked_videos)
if do_resize:
stacked_videos = self.resize(stacked_videos, size=size, interpolation=interpolation)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_center_crop:
stacked_videos = self.center_crop(stacked_videos, crop_size)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_videos_grouped[shape] = stacked_videos
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
return BatchFeature(data={"pixel_values_videos": processed_videos}, tensor_type=return_tensors)
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: str | os.PathLike,
cache_dir: str | os.PathLike | None = None,
force_download: bool = False,
local_files_only: bool = False,
token: str | bool | None = None,
revision: str = "main",
**kwargs,
):
r"""
Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained video hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a video processor file saved using the
[`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved video processor JSON *file*, e.g.,
`./my_model_directory/video_preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model video processor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the video processor files and override the cached versions if
they exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final video processor object. If `True`, then this
functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
`kwargs` which has not been used to update `video_processor` and is otherwise ignored.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are video processor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
Returns:
A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`].
Examples:
```python
# We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a
# derived class: *LlavaOnevisionVideoProcessor*
video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
"llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
) # Download video_processing_config from huggingface.co and cache.
video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
"./test/saved_model/"
) # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')*
video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/video_preprocessor_config.json")
video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
"llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False
)
assert video_processor.do_normalize is False
video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained(
"llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True
)
assert video_processor.do_normalize is False
assert unused_kwargs == {"foo": False}
```"""
kwargs["cache_dir"] = cache_dir
kwargs["force_download"] = force_download
kwargs["local_files_only"] = local_files_only
kwargs["revision"] = revision
if token is not None:
kwargs["token"] = token
video_processor_dict, kwargs = cls.get_video_processor_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(video_processor_dict, **kwargs)
def save_pretrained(self, save_directory: str | os.PathLike, push_to_hub: bool = False, **kwargs):
"""
Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
[`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the video processor JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = create_repo(repo_id, exist_ok=True, **kwargs).repo_id
files_timestamps = self._get_files_timestamps(save_directory)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_video_processor_file = os.path.join(save_directory, VIDEO_PROCESSOR_NAME)
self.to_json_file(output_video_processor_file)
logger.info(f"Video processor saved in {output_video_processor_file}")
if push_to_hub:
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=kwargs.get("token"),
)
return [output_video_processor_file]
@classmethod
def get_video_processor_dict(
cls, pretrained_model_name_or_path: str | os.PathLike, **kwargs
) -> tuple[dict[str, Any], dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
Returns:
`tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
proxies = kwargs.pop("proxies", None)
token = kwargs.pop("token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
subfolder = kwargs.pop("subfolder", "")
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "video processor", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path):
resolved_video_processor_file = pretrained_model_name_or_path
resolved_processor_file = None
is_local = True
else:
video_processor_file = VIDEO_PROCESSOR_NAME
try:
# Try to load with a new config name first and if not successful try with the old file name
# NOTE: we save all processor configs as nested dict in PROCESSOR_NAME from v5, which is the standard
resolved_processor_file = cached_file(
pretrained_model_name_or_path,
filename=PROCESSOR_NAME,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
resolved_video_processor_files = [
resolved_file
for filename in [video_processor_file, IMAGE_PROCESSOR_NAME]
if (
resolved_file := cached_file(
pretrained_model_name_or_path,
filename=filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
)
is not None
]
resolved_video_processor_file = (
resolved_video_processor_files[0] if resolved_video_processor_files else None
)
except OSError:
# Raise any OS error raise by `cached_file`. It will have a helpful error message adapted to
# the original exception.
raise
except Exception:
# For any other exception, we throw a generic error.
raise OSError(
f"Can't load video processor for '{pretrained_model_name_or_path}'. If you were trying to load"
" it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
f" directory containing a {video_processor_file} file"
)
# Load video_processor dict. Priority goes as (nested config if found -> video processor config -> image processor config)
# We are downloading both configs because almost all models have a `processor_config.json` but
# not all of these are nested. We need to check if it was saved recebtly as nested or if it is legacy style
video_processor_dict = None
if resolved_processor_file is not None:
processor_dict = safe_load_json_file(resolved_processor_file)
if "video_processor" in processor_dict:
video_processor_dict = processor_dict["video_processor"]
if resolved_video_processor_file is not None and video_processor_dict is None:
video_processor_dict = safe_load_json_file(resolved_video_processor_file)
if video_processor_dict is None:
raise OSError(
f"Can't load video processor for '{pretrained_model_name_or_path}'. If you were trying to load"
" it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
f" directory containing a {video_processor_file} file"
)
if is_local:
logger.info(f"loading configuration file {resolved_video_processor_file}")
else:
logger.info(
f"loading configuration file {video_processor_file} from cache at {resolved_video_processor_file}"
)
return video_processor_dict, kwargs
@classmethod
def from_dict(cls, video_processor_dict: dict[str, Any], **kwargs):
"""
Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.
Args:
video_processor_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the video processor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
[`~video_processing_utils.VideoProcessorBase.to_dict`] method.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the video processor object.
Returns:
[`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those
parameters.
"""
video_processor_dict = video_processor_dict.copy()
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
# The `size` parameter is a dict and was previously an int or tuple in feature extractors.
# We set `size` here directly to the `video_processor_dict` so that it is converted to the appropriate
# dict within the video processor and isn't overwritten if `size` is passed in as a kwarg.
if "size" in kwargs and "size" in video_processor_dict:
video_processor_dict["size"] = kwargs.pop("size")
if "crop_size" in kwargs and "crop_size" in video_processor_dict:
video_processor_dict["crop_size"] = kwargs.pop("crop_size")
video_processor = cls(**video_processor_dict)
# Update video_processor with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(video_processor, key):
setattr(video_processor, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Video processor {video_processor}")
if return_unused_kwargs:
return video_processor, kwargs
else:
return video_processor
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance.
"""
output = deepcopy(self.__dict__)
filtered_dict = {}
for key, value in output.items():
if value is None:
class_default = getattr(type(self), key, "NOT_FOUND")
# Keep None if user explicitly set it (class default is non-None)
if class_default != "NOT_FOUND" and class_default is not None:
filtered_dict[key] = value
else:
filtered_dict[key] = value
filtered_dict.pop("model_valid_processing_keys", None)
filtered_dict.pop("_valid_kwargs_names", None)
filtered_dict["video_processor_type"] = self.__class__.__name__
return filtered_dict
def to_json_string(self) -> str:
"""
Serializes this instance to a JSON string.
Returns:
`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
"""
dictionary = self.to_dict()
for key, value in dictionary.items():
if isinstance(value, np.ndarray):
dictionary[key] = value.tolist()
return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: str | os.PathLike):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this image_processor instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
@classmethod
def from_json_file(cls, json_file: str | os.PathLike):
"""
Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON
file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object
instantiated from that JSON file.
"""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
video_processor_dict = json.loads(text)
return cls(**video_processor_dict)
@classmethod
def register_for_auto_class(cls, auto_class="AutoVideoProcessor"):
"""
Register this class with a given auto class. This should only be used for custom video processors as the ones
in the library are already mapped with `AutoVideoProcessor `.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`):
The auto class to register this new video processor with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def fetch_videos(self, video_url_or_urls: str | list[str] | list[list[str]], sample_indices_fn=None):
"""
Convert a single or a list of urls into the corresponding `np.array` objects.
If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
returned.
"""
backend = "torchcodec"
if not is_torchcodec_available():
warnings.warn(
"`torchcodec` is not installed and cannot be used to decode the video by default. "
"Falling back to `torchvision`. Note that `torchvision` decoding is deprecated and will be removed in future versions. "
)
backend = "torchvision"
if isinstance(video_url_or_urls, list):
return list(zip(*[self.fetch_videos(x, sample_indices_fn=sample_indices_fn) for x in video_url_or_urls]))
else:
return load_video(video_url_or_urls, backend=backend, sample_indices_fn=sample_indices_fn)
BaseVideoProcessor.push_to_hub = copy_func(BaseVideoProcessor.push_to_hub)
if BaseVideoProcessor.push_to_hub.__doc__ is not None:
BaseVideoProcessor.push_to_hub.__doc__ = BaseVideoProcessor.push_to_hub.__doc__.format(
object="video processor", object_class="AutoVideoProcessor", object_files="video processor file"
)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/video_processing_utils.py",
"license": "Apache License 2.0",
"lines": 768,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/video_utils.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from collections.abc import Callable, Iterable, Mapping
from contextlib import redirect_stdout
from dataclasses import dataclass, fields
from io import BytesIO
from typing import NewType, Union
from urllib.parse import urlparse
import httpx
import numpy as np
from .image_transforms import PaddingMode, to_channel_dimension_format
from .image_utils import ChannelDimension, infer_channel_dimension_format, is_valid_image
from .utils import (
is_av_available,
is_cv2_available,
is_decord_available,
is_numpy_array,
is_torch_available,
is_torch_tensor,
is_torchcodec_available,
is_torchvision_available,
is_vision_available,
is_yt_dlp_available,
logging,
requires_backends,
)
if is_vision_available():
import PIL.Image
if is_torchvision_available():
from torchvision import io as torchvision_io
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
URL = NewType("URL", str)
Path = NewType("Path", str)
VideoInput = Union[
list["PIL.Image.Image"],
np.ndarray,
"torch.Tensor",
list[np.ndarray],
list["torch.Tensor"],
list[list["PIL.Image.Image"]],
list[list[np.ndarray]],
list[list["torch.Tensor"]],
URL,
list[URL],
list[list[URL]],
Path,
list[Path],
list[list[Path]],
]
@dataclass
class VideoMetadata(Mapping):
total_num_frames: int
fps: float | None = None
width: int | None = None
height: int | None = None
duration: float | None = None
video_backend: str | None = None
frames_indices: list[int] | None = None
def __iter__(self):
return (f.name for f in fields(self))
def __len__(self):
return len(fields(self))
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
return setattr(self, key, value)
@property
def timestamps(self) -> list[float]:
"Timestamps of the sampled frames in seconds."
if self.fps is None or self.frames_indices is None:
raise ValueError("Cannot infer video `timestamps` when `fps` or `frames_indices` is None.")
return [frame_idx / self.fps for frame_idx in self.frames_indices]
@property
def sampled_fps(self) -> float:
"FPS of the sampled video."
if self.frames_indices is None or self.total_num_frames is None or self.fps is None:
return self.fps or 24
return len(self.frames_indices) / self.total_num_frames * self.fps
def update(self, dictionary):
for key, value in dictionary.items():
if hasattr(self, key):
setattr(self, key, value)
VideoMetadataType = VideoMetadata | dict | list[dict | VideoMetadata] | list[list[dict | VideoMetadata]]
def is_valid_video_frame(frame):
return isinstance(frame, PIL.Image.Image) or (
(is_numpy_array(frame) or is_torch_tensor(frame)) and frame.ndim == 3
)
def is_valid_video(video):
if not isinstance(video, (list, tuple)):
return (is_numpy_array(video) or is_torch_tensor(video)) and video.ndim == 4
return video and all(is_valid_video_frame(frame) for frame in video)
def valid_videos(videos):
# If we have a list of videos, it could be either one video as list of frames or a batch
if isinstance(videos, (list, tuple)):
for video_or_frame in videos:
if not (is_valid_video(video_or_frame) or is_valid_video_frame(video_or_frame)):
return False
# If not a list, then we have a single 4D video or 5D batched tensor
elif not is_valid_video(videos) or videos.ndim == 5:
return False
return True
def is_batched_video(videos):
if isinstance(videos, (list, tuple)):
return is_valid_video(videos[0])
elif (is_numpy_array(videos) or is_torch_tensor(videos)) and videos.ndim == 5:
return True
return False
def is_scaled_video(video: np.ndarray) -> bool:
"""
Checks to see whether the pixel values have already been rescaled to [0, 1].
"""
# It's possible the video has pixel values in [0, 255] but is of floating type
return np.min(video) >= 0 and np.max(video) <= 1
def convert_pil_frames_to_video(videos: list[VideoInput]) -> list[Union[np.ndarray, "torch.Tensor"]]:
"""
Given a batch of videos, converts each video to a 4D array. If video is already in array type,
it is simply returned. We assume that all inputs in the list are in the same format, based on the type of the first element.
Args:
videos (`VideoInput`):
Video inputs to turn into a list of videos.
"""
if not (isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0])):
return videos
video_converted = []
for video in videos:
video = [np.array(frame) for frame in video]
video = np.stack(video)
video_converted.append(video)
return video_converted
def make_batched_videos(videos) -> list[Union[np.ndarray, "torch.Tensor", "URL", "Path"]]:
"""
Ensure that the input is a list of videos. If the input is a single video, it is converted to a list of length 1.
If the input is a batch of videos, it is converted to a list of 4D video arrays. Videos passed as list `PIL.Image`
frames are converted to 4D arrays.
We assume that all inputs in the list are in the same format, based on the type of the first element.
Args:
videos (`VideoInput`):
Video inputs to turn into a list of videos.
"""
# Early exit for deeply nested list of image frame paths. We shouldn't flatten them
try:
if isinstance(videos[0][0], list) and isinstance(videos[0][0][0], str):
return [image_paths for sublist in videos for image_paths in sublist]
except (IndexError, TypeError):
pass
if is_batched_video(videos):
return convert_pil_frames_to_video(list(videos))
elif isinstance(videos, str) or is_valid_video(videos):
return convert_pil_frames_to_video([videos])
# only one frame passed, thus we unsqueeze time dim
elif is_valid_image(videos):
if isinstance(videos, PIL.Image.Image):
videos = np.array(videos)
return [videos[None, ...]]
elif not isinstance(videos, list):
raise ValueError(
f"Invalid video input. Expected either a list of video frames or an input of 4 or 5 dimensions, but got"
f" type {type(videos)}."
)
# Recursively flatten any nested structure
flat_videos_list = []
for item in videos:
if isinstance(item, str) or is_valid_video(item):
flat_videos_list.append(item)
elif isinstance(item, list) and item:
flat_videos_list.extend(make_batched_videos(item))
flat_videos_list = convert_pil_frames_to_video(flat_videos_list)
return flat_videos_list
def make_batched_metadata(videos: VideoInput, video_metadata: VideoMetadataType) -> list[VideoMetadata]:
if video_metadata is None:
# Create default metadata and fill attributes we can infer from given video
video_metadata = [
{
"total_num_frames": len(video),
"fps": None,
"duration": None,
"frames_indices": list(range(len(video))),
"height": get_video_size(video)[0] if is_valid_video(video) else None,
"width": get_video_size(video)[1] if is_valid_video(video) else None,
}
for video in videos
]
if isinstance(video_metadata, list):
# Flatten if nested list
if isinstance(video_metadata[0], list):
video_metadata = [
VideoMetadata(**metadata) for metadata_list in video_metadata for metadata in metadata_list
]
# Simply wrap in VideoMetadata if simple dict
elif isinstance(video_metadata[0], dict):
video_metadata = [VideoMetadata(**metadata) for metadata in video_metadata]
else:
# Create a batched list from single object
video_metadata = [VideoMetadata(**video_metadata)]
return video_metadata
def get_video_size(video: np.ndarray, channel_dim: ChannelDimension | None = None) -> tuple[int, int]:
"""
Returns the (height, width) dimensions of the video.
Args:
video (`np.ndarray`):
The video to get the dimensions of.
channel_dim (`ChannelDimension`, *optional*):
Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the video.
Returns:
A tuple of the video's height and width.
"""
if channel_dim is None:
channel_dim = infer_channel_dimension_format(video, num_channels=(1, 3, 4))
if channel_dim == ChannelDimension.FIRST:
return video.shape[-2], video.shape[-1]
elif channel_dim == ChannelDimension.LAST:
return video.shape[-3], video.shape[-2]
else:
raise ValueError(f"Unsupported data format: {channel_dim}")
def get_uniform_frame_indices(total_num_frames: int, num_frames: int | None = None):
"""
Creates a numpy array for uniform sampling of `num_frame` frames from `total_num_frames`
when loading a video.
Args:
total_num_frames (`int`):
Total number of frames that a video has.
num_frames (`int`, *optional*):
Number of frames to sample uniformly. If not specified, all frames are sampled.
Returns:
np.ndarray: np array of frame indices that will be sampled.
"""
if num_frames is not None:
indices = np.arange(0, total_num_frames, total_num_frames / num_frames).astype(int)
else:
indices = np.arange(0, total_num_frames).astype(int)
return indices
def default_sample_indices_fn(metadata: VideoMetadata, num_frames=None, fps=None, **kwargs):
"""
A default sampling function that replicates the logic used in get_uniform_frame_indices,
while optionally handling `fps` if `num_frames` is not provided.
Args:
metadata (`VideoMetadata`):
`VideoMetadata` object containing metadata about the video, such as "total_num_frames" or "fps".
num_frames (`int`, *optional*):
Number of frames to sample uniformly.
fps (`int` or `float`, *optional*):
Desired frames per second. Takes priority over num_frames if both are provided.
Returns:
`np.ndarray`: Array of frame indices to sample.
"""
total_num_frames = metadata.total_num_frames
video_fps = metadata.fps
# If num_frames is not given but fps is, calculate num_frames from fps
if num_frames is None and fps is not None:
num_frames = int(total_num_frames / video_fps * fps)
if num_frames > total_num_frames:
raise ValueError(
f"When loading the video with fps={fps}, we computed num_frames={num_frames} "
f"which exceeds total_num_frames={total_num_frames}. Check fps or video metadata."
)
if num_frames is not None:
indices = np.arange(0, total_num_frames, total_num_frames / num_frames, dtype=int)
else:
indices = np.arange(0, total_num_frames, dtype=int)
return indices
def read_video_opencv(
video_path: Union["URL", "Path"],
sample_indices_fn: Callable,
**kwargs,
) -> tuple[np.ndarray, VideoMetadata]:
"""
Decode a video using the OpenCV backend.
Args:
video_path (`str`):
Path to the video file.
sample_indices_fn (`Callable`):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniform sampling with fps is performed.
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
tuple[`np.ndarray`, `VideoMetadata`]: A tuple containing:
- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
- `VideoMetadata` object.
"""
# Lazy import cv2
requires_backends(read_video_opencv, ["cv2"])
import cv2
video = cv2.VideoCapture(video_path)
total_num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
video_fps = video.get(cv2.CAP_PROP_FPS)
duration = total_num_frames / video_fps if video_fps else 0
metadata = VideoMetadata(
total_num_frames=int(total_num_frames),
fps=float(video_fps),
duration=float(duration),
video_backend="opencv",
height=int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)),
width=int(video.get(cv2.CAP_PROP_FRAME_WIDTH)),
)
indices = sample_indices_fn(metadata=metadata, **kwargs)
index = 0
frames = []
while video.isOpened():
success, frame = video.read()
if not success:
break
if index in indices:
height, width, channel = frame.shape
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame[0:height, 0:width, 0:channel])
if success:
index += 1
if index >= total_num_frames:
break
video.release()
metadata.frames_indices = indices
return np.stack(frames), metadata
def read_video_decord(
video_path: Union["URL", "Path"],
sample_indices_fn: Callable,
**kwargs,
):
"""
Decode a video using the Decord backend.
Args:
video_path (`str`):
Path to the video file.
sample_indices_fn (`Callable`):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniform sampling with fps is performed.
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
tuple[`np.array`, `VideoMetadata`]: A tuple containing:
- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
- `VideoMetadata` object.
"""
# Lazy import from decord
requires_backends(read_video_decord, ["decord"])
from decord import VideoReader, cpu
vr = VideoReader(uri=video_path, ctx=cpu(0)) # decord has problems with gpu
video_fps = vr.get_avg_fps()
total_num_frames = len(vr)
duration = total_num_frames / video_fps if video_fps else 0
metadata = VideoMetadata(
total_num_frames=int(total_num_frames),
fps=float(video_fps),
duration=float(duration),
video_backend="decord",
)
indices = sample_indices_fn(metadata=metadata, **kwargs)
video = vr.get_batch(indices).asnumpy()
metadata.update(
{
"frames_indices": indices,
"height": video.shape[1],
"width": video.shape[2],
}
)
return video, metadata
def read_video_pyav(
video_path: Union["URL", "Path"],
sample_indices_fn: Callable,
**kwargs,
):
"""
Decode the video with PyAV decoder.
Args:
video_path (`str`):
Path to the video file.
sample_indices_fn (`Callable`, *optional*):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniform sampling with fps is performed.
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
tuple[`np.array`, `VideoMetadata`]: A tuple containing:
- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
- `VideoMetadata` object.
"""
# Lazy import av
requires_backends(read_video_pyav, ["av"])
import av
container = av.open(video_path)
total_num_frames = container.streams.video[0].frames
video_fps = container.streams.video[0].average_rate # should we better use `av_guess_frame_rate`?
duration = total_num_frames / video_fps if video_fps else 0
metadata = VideoMetadata(
total_num_frames=int(total_num_frames),
fps=float(video_fps),
duration=float(duration),
video_backend="pyav",
height=container.streams.video[0].height,
width=container.streams.video[0].width,
)
indices = sample_indices_fn(metadata=metadata, **kwargs)
frames = []
container.seek(0)
end_index = indices[-1]
for i, frame in enumerate(container.decode(video=0)):
if i > end_index:
break
if i >= 0 and i in indices:
frames.append(frame)
video = np.stack([x.to_ndarray(format="rgb24") for x in frames])
metadata.frames_indices = indices
return video, metadata
def read_video_torchvision(
video_path: Union["URL", "Path"],
sample_indices_fn: Callable,
**kwargs,
):
"""
Decode the video with torchvision decoder.
Args:
video_path (`str`):
Path to the video file.
sample_indices_fn (`Callable`, *optional*):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniform sampling with fps is performed.
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
tuple[`torch.Tensor`, `VideoMetadata`]: A tuple containing:
- Torch tensor of frames in RGB (shape: [num_frames, height, width, 3]).
- `VideoMetadata` object.
"""
warnings.warn(
"Using `torchvision` for video decoding is deprecated and will be removed in future versions. "
"Please use `torchcodec` instead."
)
video, _, info = torchvision_io.read_video(
video_path,
start_pts=0.0,
end_pts=None,
pts_unit="sec",
output_format="TCHW",
)
video_fps = info["video_fps"]
total_num_frames = video.size(0)
duration = total_num_frames / video_fps if video_fps else 0
metadata = VideoMetadata(
total_num_frames=int(total_num_frames),
fps=float(video_fps),
duration=float(duration),
video_backend="torchvision",
)
indices = sample_indices_fn(metadata=metadata, **kwargs)
video = video[indices].contiguous()
metadata.update(
{
"frames_indices": indices,
"height": video.shape[2],
"width": video.shape[3],
}
)
return video, metadata
def read_video_torchcodec(
video_path: Union["URL", "Path"],
sample_indices_fn: Callable,
**kwargs,
):
"""
Decode the video with torchcodec decoder.
Args:
video_path (`str`):
Path to the video file.
sample_indices_fn (`Callable`):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniform sampling with fps is performed.
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
Tuple[`torch.Tensor`, `VideoMetadata`]: A tuple containing:
- Torch tensor of frames in RGB (shape: [num_frames, height, width, 3]).
- `VideoMetadata` object.
"""
# Lazy import torchcodec
requires_backends(read_video_torchcodec, ["torchcodec"])
from torchcodec.decoders import VideoDecoder
# VideoDecoder expects a string for device, default to "cpu" if None
decoder = VideoDecoder(
video_path,
# Interestingly `exact` mode takes less than approximate when we load the whole video
seek_mode="exact",
# Allow FFmpeg decide on the number of threads for efficiency
num_ffmpeg_threads=0,
device=kwargs.get("device", "cpu"),
)
total_num_frames = decoder.metadata.num_frames
video_fps = decoder.metadata.average_fps
metadata = VideoMetadata(
total_num_frames=total_num_frames,
fps=video_fps,
duration=decoder.metadata.duration_seconds,
video_backend="torchcodec",
height=decoder.metadata.height,
width=decoder.metadata.width,
)
indices = sample_indices_fn(metadata=metadata, **kwargs)
video = decoder.get_frames_at(indices=indices).data.contiguous()
metadata.frames_indices = indices
return video, metadata
VIDEO_DECODERS = {
"decord": read_video_decord,
"opencv": read_video_opencv,
"pyav": read_video_pyav,
"torchvision": read_video_torchvision,
"torchcodec": read_video_torchcodec,
}
def load_video(
video: VideoInput,
num_frames: int | None = None,
fps: int | float | None = None,
backend: str = "pyav",
sample_indices_fn: Callable | None = None,
**kwargs,
) -> np.ndarray:
"""
Loads `video` to a numpy array.
Args:
video (`VideoInput`):
The video to convert to the numpy array format. Can be a link to video or local path.
num_frames (`int`, *optional*):
Number of frames to sample uniformly. If not passed, the whole video is loaded.
fps (`int` or `float`, *optional*):
Number of frames to sample per second. Should be passed only when `num_frames=None`.
If not specified and `num_frames==None`, all frames are sampled.
backend (`str`, *optional*, defaults to `"pyav"`):
The backend to use when loading the video. Can be any of ["decord", "pyav", "opencv", "torchvision", "torchcodec"]. Defaults to "pyav".
sample_indices_fn (`Callable`, *optional*):
A callable function that will return indices at which the video should be sampled. If the video has to be loaded using
by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.
If not provided, simple uniformt sampling with fps is performed, otherwise `sample_indices_fn` has priority over other args.
The function expects at input the all args along with all kwargs passed to `load_video` and should output valid
indices at which the video should be sampled. For example:
Example:
def sample_indices_fn(metadata, **kwargs):
return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)
Returns:
tuple[`np.ndarray`, Dict]: A tuple containing:
- Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
- Metadata dictionary.
"""
# If `sample_indices_fn` is given, we can accept any args as those might be needed by custom `sample_indices_fn`
if fps is not None and num_frames is not None and sample_indices_fn is None:
raise ValueError(
"`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!"
)
# If user didn't pass a sampling function, create one on the fly with default logic
if sample_indices_fn is None:
def sample_indices_fn_func(metadata, **fn_kwargs):
return default_sample_indices_fn(metadata, num_frames=num_frames, fps=fps, **fn_kwargs)
sample_indices_fn = sample_indices_fn_func
# Early exit if provided an array or `PIL` frames
if not isinstance(video, str):
metadata = [None] * len(video)
return video, metadata
if urlparse(video).netloc in ["www.youtube.com", "youtube.com"]:
if not is_yt_dlp_available():
raise ImportError("To load a video from YouTube url you have to install `yt_dlp` first.")
# Lazy import from yt_dlp
requires_backends(load_video, ["yt_dlp"])
from yt_dlp import YoutubeDL
buffer = BytesIO()
with redirect_stdout(buffer), YoutubeDL() as f:
f.download([video])
bytes_obj = buffer.getvalue()
file_obj = BytesIO(bytes_obj)
elif video.startswith("http://") or video.startswith("https://"):
file_obj = BytesIO(httpx.get(video, follow_redirects=True).content)
elif os.path.isfile(video):
file_obj = video
else:
raise TypeError("Incorrect format used for video. Should be an url linking to an video or a local path.")
# can also load with decord, but not cv2/torchvision
# both will fail in case of url links
video_is_url = video.startswith("http://") or video.startswith("https://")
if video_is_url and backend == "opencv":
raise ValueError("If you are trying to load a video from URL, you cannot use 'opencv' as backend")
if (
(not is_decord_available() and backend == "decord")
or (not is_av_available() and backend == "pyav")
or (not is_cv2_available() and backend == "opencv")
or (not is_torchvision_available() and backend == "torchvision")
or (not is_torchcodec_available() and backend == "torchcodec")
):
raise ImportError(
f"You chose backend={backend} for loading the video but the required library is not found in your environment "
f"Make sure to install {backend} before loading the video."
)
video_decoder = VIDEO_DECODERS[backend]
video, metadata = video_decoder(file_obj, sample_indices_fn, **kwargs)
return video, metadata
def convert_to_rgb(
video: np.ndarray,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""
Convert video to RGB by blending the transparency layer if it's in RGBA format, otherwise simply returns it.
Args:
video (`np.ndarray`):
The video to convert.
input_data_format (`ChannelDimension`, *optional*):
The channel dimension format of the input video. If unset, will use the inferred format from the input.
"""
if not isinstance(video, np.ndarray):
raise TypeError(f"Video has to be a numpy array to convert to RGB format, but found {type(video)}")
# np.array usually comes with ChannelDimension.LAST so let's convert it
if input_data_format is None:
input_data_format = infer_channel_dimension_format(video)
video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format)
# 3 channels for RGB already
if video.shape[-3] == 3:
return video
# Grayscale video so we repeat it 3 times for each channel
if video.shape[-3] == 1:
return video.repeat(3, -3)
if not (video[..., 3, :, :] < 255).any():
return video
# There is a transparency layer, blend it with a white background.
# Calculate the alpha proportion for blending.
alpha = video[..., 3, :, :] / 255.0
video = (1 - alpha[..., None, :, :]) * 255 + alpha[..., None, :, :] * video[..., 3, :, :]
return video
def pad(
video: np.ndarray,
padding: int | tuple[int, int] | Iterable[tuple[int, int]],
mode: PaddingMode = PaddingMode.CONSTANT,
constant_values: float | Iterable[float] = 0.0,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""
Pads the `video` with the specified (height, width) `padding` and `mode`.
Args:
video (`np.ndarray`):
The video to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output video. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: video in (num_frames, num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: video in (num_frames, height, width, num_channels) format.
If unset, will use same as the input video.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input video. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: video in (num_frames, num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: video in (num_frames, height, width, num_channels) format.
If unset, will use the inferred format of the input video.
Returns:
`np.ndarray`: The padded video.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(video)
def _expand_for_data_format(values):
"""
Convert values to be in the format expected by np.pad based on the data format.
"""
if isinstance(values, (int, float)):
values = ((values, values), (values, values))
elif isinstance(values, tuple) and len(values) == 1:
values = ((values[0], values[0]), (values[0], values[0]))
elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):
values = (values, values)
elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):
pass
else:
raise ValueError(f"Unsupported format: {values}")
# add 0 for channel dimension
values = (
((0, 0), (0, 0), *values) if input_data_format == ChannelDimension.FIRST else ((0, 0), *values, (0, 0))
)
# Add additional padding if there's a batch dimension
values = (0, *values) if video.ndim == 5 else values
return values
padding_map = {
PaddingMode.CONSTANT: "constant",
PaddingMode.REFLECT: "reflect",
PaddingMode.REPLICATE: "replicate",
PaddingMode.SYMMETRIC: "symmetric",
}
padding = _expand_for_data_format(padding)
pad_kwargs = {}
if mode not in padding_map:
raise ValueError(f"Invalid padding mode: {mode}")
elif mode == PaddingMode.CONSTANT:
pad_kwargs["constant_values"] = _expand_for_data_format(constant_values)
video = np.pad(video, padding, mode=padding_map[mode], **pad_kwargs)
video = to_channel_dimension_format(video, data_format, input_data_format) if data_format is not None else video
return video
def group_videos_by_shape(
videos: list["torch.Tensor"],
) -> tuple[dict[tuple[int, int], "torch.Tensor"], dict[int, tuple[tuple[int, int], int]]]:
"""
Groups videos by shape.
Returns a dictionary with the shape as key and a list of videos with that shape as value,
and a dictionary with the index of the video in the original list as key and the shape and index in the grouped list as value.
"""
grouped_videos = {}
grouped_videos_index = {}
for i, video in enumerate(videos):
shape = video.shape[-2::]
num_frames = video.shape[-4] # video format BTCHW
shape = (num_frames, *shape)
if shape not in grouped_videos:
grouped_videos[shape] = []
grouped_videos[shape].append(video)
grouped_videos_index[i] = (shape, len(grouped_videos[shape]) - 1)
# stack videos with the same size and number of frames
grouped_videos = {shape: torch.stack(videos, dim=0) for shape, videos in grouped_videos.items()}
return grouped_videos, grouped_videos_index
def reorder_videos(
processed_videos: dict[tuple[int, int], "torch.Tensor"],
grouped_videos_index: dict[int, tuple[tuple[int, int], int]],
) -> list["torch.Tensor"]:
"""
Reconstructs a list of videos in the original order.
"""
return [
processed_videos[grouped_videos_index[i][0]][grouped_videos_index[i][1]]
for i in range(len(grouped_videos_index))
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/video_utils.py",
"license": "Apache License 2.0",
"lines": 756,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/auto/test_video_processing_auto.py | # Copyright 2025 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
VIDEO_PROCESSOR_MAPPING,
AutoConfig,
AutoVideoProcessor,
LlavaOnevisionConfig,
LlavaOnevisionVideoProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_torch
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_video_processing import CustomVideoProcessor # noqa E402
@require_torch
class AutoVideoProcessorTest(unittest.TestCase):
def setUp(self):
transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0
def test_video_processor_from_model_shortcut(self):
config = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_directory_from_key(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
config = AutoVideoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_directory_from_preprocessor_key(self):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
config = AutoVideoProcessor.from_pretrained(tmpdirname)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_directory_from_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
model_config = LlavaOnevisionConfig()
# Create a dummy config file with image_processor_type
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
# remove video_processor_type to make sure config.json alone is enough to load image processor locally
config_dict = AutoVideoProcessor.from_pretrained(tmpdirname).to_dict()
config_dict.pop("video_processor_type")
config = LlavaOnevisionVideoProcessor(**config_dict)
# save in new folder
model_config.save_pretrained(tmpdirname)
config.save_pretrained(tmpdirname)
config = AutoVideoProcessor.from_pretrained(tmpdirname)
# make sure private variable is not incorrectly saved
dict_as_saved = json.loads(config.to_json_string())
self.assertTrue("_processor_class" not in dict_as_saved)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_video_processor_from_local_file(self):
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
config = AutoVideoProcessor.from_pretrained(processor_tmpfile)
self.assertIsInstance(config, LlavaOnevisionVideoProcessor)
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"llava-hf/llava-doesnt-exist is not a local folder and is not a valid model identifier",
):
_ = AutoVideoProcessor.from_pretrained("llava-hf/llava-doesnt-exist")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = AutoVideoProcessor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
def test_video_processor_not_found(self):
with self.assertRaisesRegex(
EnvironmentError,
"Can't load video processor for 'hf-internal-testing/config-no-model'.",
):
_ = AutoVideoProcessor.from_pretrained("hf-internal-testing/config-no-model")
def test_from_pretrained_dynamic_video_processor(self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
video_processor = AutoVideoProcessor.from_pretrained("hf-internal-testing/test_dynamic_video_processor")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=False
)
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=True
)
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
# Test the dynamic module is loaded only once.
reloaded_video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=True
)
self.assertIs(video_processor.__class__, reloaded_video_processor.__class__)
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
video_processor.save_pretrained(tmp_dir)
reloaded_video_processor = AutoVideoProcessor.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertEqual(reloaded_video_processor.__class__.__name__, "NewVideoProcessor")
def test_new_video_processor_registration(self):
try:
AutoConfig.register("custom", CustomConfig)
AutoVideoProcessor.register(CustomConfig, CustomVideoProcessor)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
AutoVideoProcessor.register(LlavaOnevisionConfig, LlavaOnevisionVideoProcessor)
with tempfile.TemporaryDirectory() as tmpdirname:
processor_tmpfile = Path(tmpdirname) / "video_preprocessor_config.json"
config_tmpfile = Path(tmpdirname) / "config.json"
json.dump(
{
"video_processor_type": "LlavaOnevisionVideoProcessor",
"processor_class": "LlavaOnevisionProcessor",
},
open(processor_tmpfile, "w"),
)
json.dump({"model_type": "llava_onevision"}, open(config_tmpfile, "w"))
video_processor = CustomVideoProcessor.from_pretrained(tmpdirname)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
video_processor.save_pretrained(tmp_dir)
new_video_processor = AutoVideoProcessor.from_pretrained(tmp_dir)
self.assertIsInstance(new_video_processor, CustomVideoProcessor)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in VIDEO_PROCESSOR_MAPPING._extra_content:
del VIDEO_PROCESSOR_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_video_processor_conflict(self):
class NewVideoProcessor(LlavaOnevisionVideoProcessor):
is_local = True
try:
AutoConfig.register("custom", CustomConfig)
AutoVideoProcessor.register(CustomConfig, NewVideoProcessor)
# If remote code is not set, the default is to use local
video_processor = AutoVideoProcessor.from_pretrained("hf-internal-testing/test_dynamic_video_processor")
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
self.assertTrue(video_processor.is_local)
# If remote code is disabled, we load the local one.
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=False
)
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
self.assertTrue(video_processor.is_local)
# If remote is enabled, we load from the Hub
video_processor = AutoVideoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_video_processor", trust_remote_code=True
)
self.assertEqual(video_processor.__class__.__name__, "NewVideoProcessor")
self.assertTrue(not hasattr(video_processor, "is_local"))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in VIDEO_PROCESSOR_MAPPING._extra_content:
del VIDEO_PROCESSOR_MAPPING._extra_content[CustomConfig]
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/auto/test_video_processing_auto.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/llava_next_video/test_video_processing_llava_next_video.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import LlavaNextVideoVideoProcessor
class LlavaNextVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class LlavaNextVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = LlavaNextVideoVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = LlavaNextVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 20, "width": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"shortest_edge": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/llava_next_video/test_video_processing_llava_next_video.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/llava_onevision/test_video_processing_llava_onevision.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import LlavaOnevisionVideoProcessor
class LlavaOnevisionVideoProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, video):
return self.num_frames, self.num_channels, self.size["height"], self.size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class LlavaOnevisionVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = LlavaOnevisionVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = LlavaOnevisionVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 20, "width": 20})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42)
self.assertEqual(video_processor.size, {"shortest_edge": 42})
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/llava_onevision/test_video_processing_llava_onevision.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/qwen2_vl/test_video_processing_qwen2_vl.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import unittest
import numpy as np
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers.image_utils import get_image_size
from transformers.models.qwen2_vl.video_processing_qwen2_vl import smart_resize
if is_torchvision_available():
from transformers import Qwen2VLVideoProcessor
class Qwen2VLVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
temporal_patch_size=2,
patch_size=14,
min_pixels=20 * 20,
max_pixels=100 * 100,
merge_size=2,
):
size = size if size is not None else {"shortest_edge": 400, "longest_edge": 10000}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.temporal_patch_size = temporal_patch_size
self.patch_size = patch_size
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.merge_size = merge_size
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"temporal_patch_size": self.temporal_patch_size,
"patch_size": self.patch_size,
"min_pixels": self.min_pixels,
"max_pixels": self.max_pixels,
"merge_size": self.merge_size,
}
@require_vision
def expected_output_video_shape(self, videos, num_frames=None):
num_frames = num_frames if num_frames is not None else self.num_frames
grid_t = num_frames // self.temporal_patch_size
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
seq_len = 0
for video in videos:
if isinstance(video[0], Image.Image):
video = np.stack([np.array(frame) for frame in video])
height, width = get_image_size(video)
resized_height, resized_width = smart_resize(
height,
width,
factor=self.patch_size * self.merge_size,
min_pixels=self.min_pixels,
max_pixels=self.max_pixels,
)
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
seq_len += grid_t * grid_h * grid_w
return [seq_len, hidden_dim]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class Qwen2VLVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = Qwen2VLVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = Qwen2VLVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"shortest_edge": 400, "longest_edge": 10000})
video_processor = self.fast_video_processing_class.from_dict(
self.video_processor_dict, size={"shortest_edge": 100, "longest_edge": 200}
)
self.assertEqual(video_processor.size, {"shortest_edge": 100, "longest_edge": 200})
def test_video_processor_to_json_string(self):
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class(**self.video_processor_dict)
obj = json.loads(video_processor.to_json_string())
for key, value in self.video_processor_dict.items():
if key not in ["min_pixels", "max_pixels"]:
self.assertEqual(obj[key], value)
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
# Each video is a list of PIL Images
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
for video in video_inputs:
self.assertIsInstance(video, np.ndarray)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random PyTorch tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="torch"
)
for video in video_inputs:
self.assertIsInstance(video, torch.Tensor)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
self.assertEqual(
list(encoded_videos.shape),
expected_output_video_shape,
)
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
video_inputs_nested = [list(video) for video in video_inputs]
encoded_videos = video_processing(video_inputs_nested[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs_nested, return_tensors="pt")[self.input_name]
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
@unittest.skip("Skip for now, the test needs adjustment fo Qwen2VL")
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
# Force set sampling to False. No sampling is expected even when `num_frames` exists
video_processing.do_sample_frames = False
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape)
self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched)
# Set sampling to True. Video frames should be sampled with `num_frames` in the output
video_processing.do_sample_frames = True
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=4)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=4)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(
[video_inputs[0]], num_frames=4
)
expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(
video_inputs, num_frames=4
)
self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape)
self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched)
metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", fps=3, video_metadata=batched_metadata
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(
[video_inputs[0]], num_frames=6
)
expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(
video_inputs, num_frames=6
)
self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape)
self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched)
# We should raise error when asked to sample more frames than there are in input video
with self.assertRaises(ValueError):
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=10)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=10)[
self.input_name
]
# Assign back the actual num frames in tester
self.video_processor_tester.num_frames = prev_num_frames
def test_num_frames_equal_temporal_patch_size_plus_two(self):
for video_processing_class in self.video_processor_list:
video_processor_dict = self.video_processor_dict.copy()
video_processor_dict["size"] = {"longest_edge": 5 * 28 * 28, "shortest_edge": 28 * 28}
video_processor_dict["do_sample_frames"] = False
temporal_patch_size = 3
video_processor_dict["temporal_patch_size"] = temporal_patch_size
video_processing = video_processing_class(**video_processor_dict)
n, w, h = 5, 28, 28
video_inputs = [(np.random.randint(0, 256, (h, w, 3), dtype=np.uint8)) for _ in range(n)]
video_processed = video_processing(video_inputs, return_tensors="pt")
encoded_videos = video_processed[self.input_name]
self.assertEqual(list(encoded_videos.shape), [8, temporal_patch_size * 3 * 14 * 14])
video_grid_thw = video_processed["video_grid_thw"]
self.assertEqual(video_grid_thw.tolist(), [[2, 2, 2]])
def test_bc_min_max_pixels(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
video_processing.save_pretrained(tmpdirname)
video_processing_loaded = video_processing_class.from_pretrained(
tmpdirname, max_pixels=56 * 56, min_pixels=28 * 28
)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=True,
return_tensors="torch",
)
processed = video_processing_loaded(video_inputs, return_tensors="pt")
expected_output_video_shape = [320, 1176]
self.assertListEqual(list(processed.pixel_values_videos.shape), expected_output_video_shape)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/qwen2_vl/test_video_processing_qwen2_vl.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/smolvlm/test_video_processing_smolvlm.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import SmolVLMVideoProcessor
class SmolVLMVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"longest_edge": 20}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.max_image_size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"max_image_size": self.max_image_size,
}
def expected_output_video_shape(self, videos):
return [
self.num_frames,
self.num_channels,
self.max_image_size["longest_edge"],
self.max_image_size["longest_edge"],
]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class SmolVLMVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = SmolVLMVideoProcessor if is_torchvision_available() else None
input_name = "pixel_values"
def setUp(self):
super().setUp()
self.video_processor_tester = SmolVLMVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"longest_edge": 20})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42)
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
# overwrite, SmolVLM requires to have metadata no matter how we sample
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
# Force set sampling to False. No sampling is expected even when `num_frames` exists
video_processing.do_sample_frames = False
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 8)
self.assertEqual(encoded_videos_batched.shape[1], 8)
# Set sampling to True. Video frames should be sampled with `num_frames` in the output
video_processing.do_sample_frames = True
metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(
video_inputs[0], return_tensors="pt", num_frames=6, fps=3, video_metadata=metadata
)[self.input_name]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", num_frames=6, fps=3, video_metadata=batched_metadata
)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 6)
self.assertEqual(encoded_videos_batched.shape[1], 6)
# Assign back the actual num frames in tester
self.video_processor_tester.num_frames = prev_num_frames
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/smolvlm/test_video_processing_smolvlm.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/video_llava/test_video_processing_video_llava.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import VideoLlavaVideoProcessor
class VideoLlavaVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class VideoLlavaVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = VideoLlavaVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = VideoLlavaVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_properties(self):
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
self.assertTrue(hasattr(video_processing, "do_resize"))
self.assertTrue(hasattr(video_processing, "size"))
self.assertTrue(hasattr(video_processing, "do_center_crop"))
self.assertTrue(hasattr(video_processing, "center_crop"))
self.assertTrue(hasattr(video_processing, "do_normalize"))
self.assertTrue(hasattr(video_processing, "image_mean"))
self.assertTrue(hasattr(video_processing, "image_std"))
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/video_llava/test_video_processing_video_llava.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/test_video_processing_common.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import warnings
from copy import deepcopy
import numpy as np
import pytest
from transformers import AutoVideoProcessor
from transformers.testing_utils import (
check_json_file_has_correct_format,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from transformers.video_utils import VideoMetadata
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
def prepare_video(num_frames, num_channels, width=10, height=10, return_tensors="pil"):
"""This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors."""
video = []
for i in range(num_frames):
video.append(np.random.randint(255, size=(width, height, num_channels), dtype=np.uint8))
if return_tensors == "pil":
# PIL expects the channel dimension as last dimension
video = [Image.fromarray(frame) for frame in video]
elif return_tensors == "torch":
# Torch images are typically in channels first format
video = torch.tensor(video).permute(0, 3, 1, 2)
elif return_tensors == "np":
# Numpy images are typically in channels last format
video = np.array(video)
return video
def prepare_video_inputs(
batch_size,
num_frames,
num_channels,
min_resolution,
max_resolution,
equal_resolution=False,
return_tensors="pil",
):
"""This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if
one specifies return_tensors="np", or a list of list of PyTorch tensors if one specifies return_tensors="torch".
One can specify whether the videos are of the same resolution or not.
"""
video_inputs = []
for i in range(batch_size):
if equal_resolution:
width = height = max_resolution
else:
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
video = prepare_video(
num_frames=num_frames,
num_channels=num_channels,
width=width,
height=height,
return_tensors=return_tensors,
)
video_inputs.append(video)
return video_inputs
class VideoProcessingTestMixin:
test_cast_dtype = None
fast_video_processing_class = None
video_processor_list = None
input_name = "pixel_values_videos"
def setUp(self):
video_processor_list = []
if self.fast_video_processing_class:
video_processor_list.append(self.fast_video_processing_class)
self.video_processor_list = video_processor_list
def test_video_processor_to_json_string(self):
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class(**self.video_processor_dict)
obj = json.loads(video_processor.to_json_string())
for key, value in self.video_processor_dict.items():
self.assertEqual(obj[key], value)
def test_video_processor_to_json_file(self):
for video_processing_class in self.video_processor_list:
video_processor_first = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "video_processor.json")
video_processor_first.to_json_file(json_file_path)
video_processor_second = video_processing_class.from_json_file(json_file_path)
self.assertEqual(video_processor_second.to_dict(), video_processor_first.to_dict())
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"shortest_edge": 20})
self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42, crop_size=84)
self.assertEqual(video_processor.size, {"shortest_edge": 42})
self.assertEqual(video_processor.crop_size, {"height": 84, "width": 84})
def test_video_processor_from_and_save_pretrained(self):
for video_processing_class in self.video_processor_list:
video_processor_first = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = video_processor_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
video_processor_second = video_processing_class.from_pretrained(tmpdirname)
self.assertEqual(video_processor_second.to_dict(), video_processor_first.to_dict())
def test_video_processor_save_load_with_autovideoprocessor(self):
for video_processing_class in self.video_processor_list:
video_processor_first = video_processing_class(**self.video_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = video_processor_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
use_fast = video_processing_class.__name__.endswith("Fast")
video_processor_second = AutoVideoProcessor.from_pretrained(tmpdirname, use_fast=use_fast)
self.assertEqual(video_processor_second.to_dict(), video_processor_first.to_dict())
def test_init_without_params(self):
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class()
self.assertIsNotNone(video_processor)
def test_video_processor_explicit_none_preserved(self):
"""Test that explicitly setting an attribute to None is preserved through save/load."""
# Find an attribute with a non-None class default to test explicit None override
test_attr = None
for attr in ["do_resize", "do_rescale", "do_normalize"]:
if getattr(self.fast_video_processing_class, attr, None) is not None:
test_attr = attr
break
if test_attr is None:
self.skipTest("Could not find a suitable attribute to test")
# Create processor with explicit None (override the attribute)
kwargs = self.video_processor_dict.copy()
kwargs[test_attr] = None
video_processor = self.fast_video_processing_class(**kwargs)
# Verify it's in to_dict() as None (not filtered out)
self.assertIn(test_attr, video_processor.to_dict())
self.assertIsNone(video_processor.to_dict()[test_attr])
# Verify explicit None survives save/load cycle
with tempfile.TemporaryDirectory() as tmpdirname:
video_processor.save_pretrained(tmpdirname)
reloaded = self.fast_video_processing_class.from_pretrained(tmpdirname)
self.assertIsNone(getattr(reloaded, test_attr), f"Explicit None for {test_attr} was lost after reload")
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_video_processor(self):
if self.fast_video_processing_class is None:
self.skipTest("Skipping compilation test as fast video processor is not defined")
torch.compiler.reset()
video_inputs = self.video_processor_tester.prepare_video_inputs(equal_resolution=False, return_tensors="torch")
video_processor = self.fast_video_processing_class(**self.video_processor_dict)
output_eager = video_processor(video_inputs, device=torch_device, do_sample_frames=False, return_tensors="pt")
video_processor = torch.compile(video_processor, mode="reduce-overhead")
output_compiled = video_processor(
video_inputs, device=torch_device, do_sample_frames=False, return_tensors="pt"
)
torch.testing.assert_close(
output_eager[self.input_name], output_compiled[self.input_name], rtol=1e-4, atol=1e-4
)
@require_torch
@require_vision
def test_cast_dtype_device(self):
for video_processing_class in self.video_processor_list:
if self.test_cast_dtype is not None:
# Initialize video_processor
video_processor = video_processing_class(**self.video_processor_dict)
# create random PyTorch tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="torch"
)
encoding = video_processor(video_inputs, return_tensors="pt")
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.float32)
encoding = video_processor(video_inputs, return_tensors="pt").to(torch.float16)
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.float16)
encoding = video_processor(video_inputs, return_tensors="pt").to("cpu", torch.bfloat16)
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.bfloat16)
with self.assertRaises(TypeError):
_ = video_processor(video_inputs, return_tensors="pt").to(torch.bfloat16, "cpu")
# Try with text + video feature
encoding = video_processor(video_inputs, return_tensors="pt")
encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])})
encoding = encoding.to(torch.float16)
self.assertEqual(encoding[self.input_name].device, torch.device("cpu"))
self.assertEqual(encoding[self.input_name].dtype, torch.float16)
self.assertEqual(encoding.input_ids.dtype, torch.long)
def test_call_pil(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(equal_resolution=False)
# Each video is a list of PIL Images
for video in video_inputs:
self.assertIsInstance(video[0], Image.Image)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(
tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_numpy(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
for video in video_inputs:
self.assertIsInstance(video, np.ndarray)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
self.assertEqual(
tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_pytorch(self):
for video_processing_class in self.video_processor_list:
# Initialize video_processing
video_processing = video_processing_class(**self.video_processor_dict)
# create random PyTorch tensors
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="torch"
)
for video in video_inputs:
self.assertIsInstance(video, torch.Tensor)
# Test not batched input
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
self.assertEqual(
tuple(encoded_videos.shape),
(self.video_processor_tester.batch_size, *expected_output_video_shape),
)
def test_call_sample_frames(self):
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
prev_num_frames = self.video_processor_tester.num_frames
self.video_processor_tester.num_frames = 8
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False,
return_tensors="torch",
)
# Force set sampling to False. No sampling is expected even when `num_frames` exists
video_processing.do_sample_frames = False
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 8)
self.assertEqual(encoded_videos_batched.shape[1], 8)
# Set sampling to True. Video frames should be sampled with `num_frames` in the output
video_processing.do_sample_frames = True
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 3)
self.assertEqual(encoded_videos_batched.shape[1], 3)
# Sample with `fps` requires metadata to infer number of frames from total duration
with self.assertRaises(ValueError):
metadata = VideoMetadata(**{"total_num_frames": 8})
video_processing.sample_frames(metadata=metadata, fps=3)
metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[
self.input_name
]
encoded_videos_batched = video_processing(
video_inputs, return_tensors="pt", fps=3, video_metadata=batched_metadata
)[self.input_name]
self.assertEqual(encoded_videos.shape[1], 6)
self.assertEqual(encoded_videos_batched.shape[1], 6)
# The same as above but uses a `VideoMetadata` object in the input
metadata = [[VideoMetadata(duration=2.0, total_num_frames=8, fps=4)]]
batched_metadata = metadata * len(video_inputs)
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[
self.input_name
]
# We should raise error when asked to sample more frames than there are in input video
with self.assertRaises(ValueError):
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=10)[self.input_name]
encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=10)[
self.input_name
]
# Assign back the actual num frames in tester
self.video_processor_tester.num_frames = prev_num_frames
def test_nested_input(self):
"""Tests that the processor can work with nested list where each video is a list of arrays"""
for video_processing_class in self.video_processor_list:
video_processing = video_processing_class(**self.video_processor_dict)
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="np"
)
# Test not batched input
video_inputs = [list(video) for video in video_inputs]
encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name]
self.assertEqual(
tuple(encoded_videos.shape),
(self.video_processor_tester.batch_size, *expected_output_video_shape),
)
def test_call_numpy_4_channels(self):
for video_processing_class in self.video_processor_list:
# Test that can process videos which have an arbitrary number of channels
# Initialize video_processing
video_processor = video_processing_class(**self.video_processor_dict)
# create random numpy tensors
self.video_processor_tester.num_channels = 4
video_inputs = self.video_processor_tester.prepare_video_inputs(
equal_resolution=False, return_tensors="pil"
)
# Test not batched input
encoded_videos = video_processor(
video_inputs[0],
return_tensors="pt",
input_data_format="channels_last",
image_mean=0.0,
image_std=1.0,
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
if video_processor.do_convert_rgb:
expected_output_video_shape = list(expected_output_video_shape)
expected_output_video_shape[1] = 3
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = video_processor(
video_inputs,
return_tensors="pt",
input_data_format="channels_last",
image_mean=0.0,
image_std=1.0,
)[self.input_name]
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
if video_processor.do_convert_rgb:
expected_output_video_shape = list(expected_output_video_shape)
expected_output_video_shape[1] = 3
self.assertEqual(
tuple(encoded_videos.shape), (self.video_processor_tester.batch_size, *expected_output_video_shape)
)
def test_video_processor_preprocess_arguments(self):
is_tested = False
for video_processing_class in self.video_processor_list:
video_processor = video_processing_class(**self.video_processor_dict)
# validation done by _valid_processor_keys attribute
if hasattr(video_processor, "_valid_processor_keys") and hasattr(video_processor, "preprocess"):
preprocess_parameter_names = inspect.getfullargspec(video_processor.preprocess).args
preprocess_parameter_names.remove("self")
preprocess_parameter_names.sort()
valid_processor_keys = video_processor._valid_processor_keys
valid_processor_keys.sort()
self.assertEqual(preprocess_parameter_names, valid_processor_keys)
is_tested = True
# validation done by @filter_out_non_signature_kwargs decorator
if hasattr(video_processor.preprocess, "_filter_out_non_signature_kwargs"):
if hasattr(self.video_processor_tester, "prepare_video_inputs"):
inputs = self.video_processor_tester.prepare_video_inputs()
elif hasattr(self.video_processor_tester, "prepare_video_inputs"):
inputs = self.video_processor_tester.prepare_video_inputs()
else:
self.skipTest(reason="No valid input preparation method found")
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter("always")
video_processor(inputs, extra_argument=True)
messages = " ".join([str(w.message) for w in raised_warnings])
self.assertGreaterEqual(len(raised_warnings), 1)
self.assertIn("extra_argument", messages)
is_tested = True
if not is_tested:
self.skipTest(reason="No validation found for `preprocess` method")
def test_override_instance_attributes_does_not_affect_other_instances(self):
if self.fast_video_processing_class is None:
self.skipTest(
"Only testing fast video processor, as most slow processors break this test and are to be deprecated"
)
video_processing_class = self.fast_video_processing_class
video_processor_1 = video_processing_class()
video_processor_2 = video_processing_class()
if not (hasattr(video_processor_1, "size") and isinstance(video_processor_1.size, dict)) or not (
hasattr(video_processor_1, "image_mean") and isinstance(video_processor_1.image_mean, list)
):
self.skipTest(
reason="Skipping test as the image processor does not have dict size or list image_mean attributes"
)
original_size_2 = deepcopy(video_processor_2.size)
for key in video_processor_1.size:
video_processor_1.size[key] = -1
modified_copied_size_1 = deepcopy(video_processor_1.size)
original_image_mean_2 = deepcopy(video_processor_2.image_mean)
video_processor_1.image_mean[0] = -1
modified_copied_image_mean_1 = deepcopy(video_processor_1.image_mean)
# check that the original attributes of the second instance are not affected
self.assertEqual(video_processor_2.size, original_size_2)
self.assertEqual(video_processor_2.image_mean, original_image_mean_2)
for key in video_processor_2.size:
video_processor_2.size[key] = -2
video_processor_2.image_mean[0] = -2
# check that the modified attributes of the first instance are not affected by the second instance
self.assertEqual(video_processor_1.size, modified_copied_size_1)
self.assertEqual(video_processor_1.image_mean, modified_copied_image_mean_1)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/test_video_processing_common.py",
"license": "Apache License 2.0",
"lines": 425,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/utils/test_video_utils.py | # Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_torch_available, is_vision_available
from transformers.image_processing_utils import get_size_dict
from transformers.image_utils import SizeDict
from transformers.processing_utils import VideosKwargs
from transformers.testing_utils import (
require_av,
require_cv2,
require_decord,
require_torch,
require_torchcodec,
require_torchvision,
require_vision,
)
from transformers.video_utils import group_videos_by_shape, make_batched_videos, reorder_videos
if is_torch_available():
import torch
if is_vision_available():
import PIL
from transformers import BaseVideoProcessor
from transformers.video_utils import VideoMetadata, load_video
def get_random_video(height, width, num_frames=8, return_torch=False):
random_frame = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
video = np.array([random_frame] * num_frames)
if return_torch:
# move channel first
return torch.from_numpy(video).permute(0, 3, 1, 2)
return video
@require_vision
@require_torchvision
class BaseVideoProcessorTester(unittest.TestCase):
"""
Tests that the `transforms` can be applied to a 4-dim array directly, i.e. to a whole video.
"""
def test_make_batched_videos_pil(self):
# Test a single image is converted to a list of 1 video with 1 frame
video = get_random_video(16, 32)
pil_image = PIL.Image.fromarray(video[0])
videos_list = make_batched_videos(pil_image)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (1, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0][0], np.array(pil_image)))
# Test a list of videos is converted to a list of 1 video
video = get_random_video(16, 32)
pil_video = [PIL.Image.fromarray(frame) for frame in video]
videos_list = make_batched_videos(pil_video)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
# Test a nested list of videos is not modified
video = get_random_video(16, 32)
pil_video = [PIL.Image.fromarray(frame) for frame in video]
videos = [pil_video, pil_video]
videos_list = make_batched_videos(videos)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
def test_make_batched_videos_numpy(self):
# Test a single image is converted to a list of 1 video with 1 frame
video = get_random_video(16, 32)[0]
videos_list = make_batched_videos(video)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (1, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0][0], video))
# Test a 4d array of videos is converted to a list of 1 video
video = get_random_video(16, 32)
videos_list = make_batched_videos(video)
self.assertIsInstance(videos_list, list)
self.assertTrue(len(videos_list), 1)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
# Test a 5d array of batch videos is converted to a list of videos
video = video[None, ...].repeat(4, 0)
videos_list = make_batched_videos(video)
self.assertIsInstance(videos_list, list)
self.assertTrue(len(videos_list), 4)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video[0]))
# Test a list of videos is converted to a list of videos
video = get_random_video(16, 32)
videos = [video, video]
videos_list = make_batched_videos(videos)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], np.ndarray)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
@require_torch
def test_make_batched_videos_torch(self):
# Test a single image is converted to a list of 1 video with 1 frame
video = get_random_video(16, 32)[0]
torch_video = torch.from_numpy(video)
videos_list = make_batched_videos(torch_video)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], torch.Tensor)
self.assertEqual(videos_list[0].shape, (1, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0][0], video))
# Test a 4d array of videos is converted to a list of 1 video
video = get_random_video(16, 32)
torch_video = torch.from_numpy(video)
videos_list = make_batched_videos(torch_video)
self.assertIsInstance(videos_list, list)
self.assertTrue(len(videos_list), 1)
self.assertIsInstance(videos_list[0], torch.Tensor)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
# Test a 5d array of batch videos is converted to a list of videos
torch_video = torch_video[None, ...].repeat(4, 1, 1, 1, 1)
videos_list = make_batched_videos(torch_video)
self.assertIsInstance(videos_list, list)
self.assertTrue(len(videos_list), 4)
self.assertIsInstance(videos_list[0], torch.Tensor)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
# Test a list of videos is converted to a list of videos
video = get_random_video(16, 32)
torch_video = torch.from_numpy(video)
videos = [torch_video, torch_video]
videos_list = make_batched_videos(videos)
self.assertIsInstance(videos_list, list)
self.assertIsInstance(videos_list[0], torch.Tensor)
self.assertEqual(videos_list[0].shape, (8, 16, 32, 3))
self.assertTrue(np.array_equal(videos_list[0], video))
def test_resize(self):
video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs)
video = get_random_video(16, 32, return_torch=True)
# Size can be an int or a tuple of ints.
size_dict = SizeDict(**get_size_dict((8, 8), param_name="size"))
resized_video = video_processor.resize(video, size=size_dict)
self.assertIsInstance(resized_video, torch.Tensor)
self.assertEqual(resized_video.shape, (8, 3, 8, 8))
def test_normalize(self):
video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs)
array = torch.randn(4, 3, 16, 32)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or NumPy arrays.
expected = (array - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None]
normalized_array = video_processor.normalize(array, mean, std)
torch.testing.assert_close(normalized_array, expected)
def test_center_crop(self):
video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs)
video = get_random_video(16, 32, return_torch=True)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
size_dict = SizeDict(**get_size_dict(size, default_to_square=True, param_name="crop_size"))
cropped_video = video_processor.center_crop(video, size_dict)
self.assertIsInstance(cropped_video, torch.Tensor)
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_video.shape, (8, 3, *expected_size))
def test_convert_to_rgb(self):
video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs)
video = get_random_video(20, 20, return_torch=True)
rgb_video = video_processor.convert_to_rgb(video[:, :1])
self.assertEqual(rgb_video.shape, (8, 3, 20, 20))
rgb_video = video_processor.convert_to_rgb(torch.cat([video, video[:, :1]], dim=1))
self.assertEqual(rgb_video.shape, (8, 3, 20, 20))
def test_group_and_reorder_videos(self):
"""Tests that videos can be grouped by frame size and number of frames"""
video_1 = get_random_video(20, 20, num_frames=3, return_torch=True)
video_2 = get_random_video(20, 20, num_frames=5, return_torch=True)
# Group two videos of same size but different number of frames
grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_2])
self.assertEqual(len(grouped_videos), 2)
regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index)
self.assertTrue(len(regrouped_videos), 2)
self.assertEqual(video_1.shape, regrouped_videos[0].shape)
# Group two videos of different size but same number of frames
video_3 = get_random_video(15, 20, num_frames=3, return_torch=True)
grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_3])
self.assertEqual(len(grouped_videos), 2)
regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index)
self.assertTrue(len(regrouped_videos), 2)
self.assertEqual(video_1.shape, regrouped_videos[0].shape)
# Group all three videos where some have same size or same frame count
# But since none have frames and sizes identical, we'll have 3 groups
grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_2, video_3])
self.assertEqual(len(grouped_videos), 3)
regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index)
self.assertTrue(len(regrouped_videos), 3)
self.assertEqual(video_1.shape, regrouped_videos[0].shape)
# Group if we had some videos with identical shapes
grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_1, video_3])
self.assertEqual(len(grouped_videos), 2)
regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index)
self.assertTrue(len(regrouped_videos), 2)
self.assertEqual(video_1.shape, regrouped_videos[0].shape)
# Group if we had all videos with identical shapes
grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_1, video_1])
self.assertEqual(len(grouped_videos), 1)
regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index)
self.assertTrue(len(regrouped_videos), 1)
self.assertEqual(video_1.shape, regrouped_videos[0].shape)
@require_vision
@require_av
class LoadVideoTester(unittest.TestCase):
def test_load_video_url(self):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
)
self.assertEqual(video.shape, (243, 360, 640, 3)) # 243 frames is the whole video, no sampling applied
def test_load_video_local(self):
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
video, _ = load_video(video_file_path)
self.assertEqual(video.shape, (243, 360, 640, 3)) # 243 frames is the whole video, no sampling applied
# FIXME: @raushan, yt-dlp downloading works for for some reason it cannot redirect to out buffer?
# @requires_yt_dlp
# def test_load_video_youtube(self):
# video = load_video("https://www.youtube.com/watch?v=QC8iQqtG0hg")
# self.assertEqual(video.shape, (243, 360, 640, 3)) # 243 frames is the whole video, no sampling applied
@require_decord
@require_torchvision
@require_torchcodec
@require_cv2
def test_load_video_backend_url(self):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
backend="decord",
)
self.assertEqual(video.shape, (243, 360, 640, 3))
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
backend="torchcodec",
)
self.assertEqual(video.shape, (243, 360, 640, 3))
# Can't use certain backends with url
with self.assertRaises(ValueError):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
backend="opencv",
)
with self.assertRaises(ValueError):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
backend="torchvision",
)
@require_decord
@require_torchvision
@require_torchcodec
@require_cv2
def test_load_video_backend_local(self):
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
video, metadata = load_video(video_file_path, backend="decord")
self.assertEqual(video.shape, (243, 360, 640, 3))
self.assertIsInstance(metadata, VideoMetadata)
video, metadata = load_video(video_file_path, backend="opencv")
self.assertEqual(video.shape, (243, 360, 640, 3))
self.assertIsInstance(metadata, VideoMetadata)
video, metadata = load_video(video_file_path, backend="torchvision")
self.assertEqual(video.shape, (243, 360, 640, 3))
self.assertIsInstance(metadata, VideoMetadata)
video, metadata = load_video(video_file_path, backend="torchcodec")
self.assertEqual(video.shape, (243, 360, 640, 3))
self.assertIsInstance(metadata, VideoMetadata)
def test_load_video_num_frames(self):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
num_frames=16,
)
self.assertEqual(video.shape, (16, 360, 640, 3))
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
num_frames=22,
)
self.assertEqual(video.shape, (22, 360, 640, 3))
def test_load_video_fps(self):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", fps=1
)
self.assertEqual(video.shape, (9, 360, 640, 3))
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", fps=2
)
self.assertEqual(video.shape, (19, 360, 640, 3))
# `num_frames` is mutually exclusive with `video_fps`
with self.assertRaises(ValueError):
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4",
fps=1,
num_frames=10,
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/utils/test_video_utils.py",
"license": "Apache License 2.0",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/utils/import_structures/import_structure_raw_register_with_versions.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
from transformers.utils.import_utils import requires
@requires(backends=("torch>=2.5",))
class D0:
def __init__(self):
pass
@requires(backends=("torch>=2.5",))
def d0():
pass
@requires(backends=("torch>2.5",))
class D1:
def __init__(self):
pass
@requires(backends=("torch>2.5",))
def d1():
pass
@requires(backends=("torch<=2.5",))
class D2:
def __init__(self):
pass
@requires(backends=("torch<=2.5",))
def d2():
pass
@requires(backends=("torch<2.5",))
class D3:
def __init__(self):
pass
@requires(backends=("torch<2.5",))
def d3():
pass
@requires(backends=("torch==2.5",))
class D4:
def __init__(self):
pass
@requires(backends=("torch==2.5",))
def d4():
pass
@requires(backends=("torch!=2.5",))
class D5:
def __init__(self):
pass
@requires(backends=("torch!=2.5",))
def d5():
pass
@requires(backends=("torch>=2.5", "accelerate<0.20"))
class D6:
def __init__(self):
pass
@requires(backends=("torch>=2.5", "accelerate<0.20"))
def d6():
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/utils/import_structures/import_structure_raw_register_with_versions.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:utils/extract_pr_number_from_circleci.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used by `.github/workflows/trigger_circleci.yml` to get the pull request number in CircleCI job runs."""
import os
if __name__ == "__main__":
pr_number = ""
pr = os.environ.get("CIRCLE_PULL_REQUEST", "")
if len(pr) > 0:
pr_number = pr.split("/")[-1]
if pr_number == "":
pr = os.environ.get("CIRCLE_BRANCH", "")
if pr.startswith("pull/"):
pr_number = "".join(pr.split("/")[1:2])
print(pr_number)
| {
"repo_id": "huggingface/transformers",
"file_path": "utils/extract_pr_number_from_circleci.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/utils/test_auto_docstring.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for auto_docstring decorator and check_auto_docstrings function.
"""
import importlib
import os
import statistics
import sys
import tempfile
import textwrap
import time
import unittest
from pathlib import Path
import torch
from transformers.configuration_utils import PretrainedConfig
from transformers.image_processing_utils import BatchFeature
from transformers.image_processing_utils_fast import BaseImageProcessorFast
from transformers.image_utils import ImageInput
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from transformers.testing_utils import require_torch
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
from transformers.utils.auto_docstring import (
auto_docstring,
)
from transformers.utils.import_utils import is_torch_available
if is_torch_available():
import torch
_repo_root = Path(__file__).resolve().parent.parent.parent
sys.path.insert(0, str(_repo_root / "utils"))
from check_docstrings import ( # noqa: E402
_build_ast_indexes,
_find_typed_dict_classes,
find_files_with_auto_docstring,
update_file_with_new_docstrings,
)
class TestCheckDocstrings(unittest.TestCase):
"""Test check_auto_docstrings static analysis tool for detecting and fixing docstring issues."""
def test_missing_args_detection_and_placeholder_generation(self):
"""Test that missing custom args are detected and placeholders generated while preserving Examples and code."""
with tempfile.TemporaryDirectory() as tmpdir:
test_file = os.path.join(tmpdir, "model.py")
original = textwrap.dedent("""
from transformers.utils.auto_docstring import auto_docstring
@auto_docstring
def forward(self, input_ids, custom_temperature: float = 1.0):
'''
Example:
```python
>>> model.forward(input_ids, custom_temperature=0.7)
```
'''
result = input_ids * custom_temperature
return result
""")
with open(test_file, "w") as f:
f.write(original)
with open(test_file, "r") as f:
content = f.read()
items = _build_ast_indexes(content)
lines = content.split("\n")
# Test detection (overwrite=False) - should detect missing arg
missing, fill, redundant = update_file_with_new_docstrings(
test_file, lines, items, content, overwrite=False
)
self.assertTrue(any("custom_temperature" in msg for msg in missing))
# Generate placeholders (overwrite=True)
update_file_with_new_docstrings(test_file, lines, items, content, overwrite=True)
with open(test_file, "r") as f:
updated = f.read()
# Verify results
self.assertIn("custom_temperature", updated)
self.assertIn("<fill_docstring>", updated) # Placeholder added
self.assertIn("input_ids", updated) # Standard arg from ModelArgs
self.assertIn("Example:", updated) # Example preserved
self.assertIn("result = input_ids * custom_temperature", updated) # Code preserved
def test_multi_item_file_processing(self):
"""Test processing files with multiple @auto_docstring decorators (class + method) in a single pass."""
with tempfile.TemporaryDirectory() as tmpdir:
test_file = os.path.join(tmpdir, "modeling.py")
original = textwrap.dedent("""
from transformers.utils.auto_docstring import auto_docstring
from transformers.modeling_utils import PreTrainedModel
@auto_docstring
class MyModel(PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layer = None
@auto_docstring
def forward(self, input_ids, scale_factor: float = 1.0):
'''
Example:
```python
>>> outputs = model.forward(input_ids, scale_factor=2.0)
```
'''
return self.layer(input_ids) * scale_factor
""")
with open(test_file, "w") as f:
f.write(original)
with open(test_file, "r") as f:
content = f.read()
items = _build_ast_indexes(content)
# Should find 2 decorated items
self.assertEqual(len(items), 2)
self.assertEqual(items[0].kind, "class")
self.assertEqual(items[1].kind, "function")
lines = content.split("\n")
# Detect issues
missing, fill, redundant = update_file_with_new_docstrings(
test_file, lines, items, content, overwrite=False
)
# Should detect missing scale_factor in forward method
self.assertTrue(any("scale_factor" in msg for msg in missing))
# Update file
update_file_with_new_docstrings(test_file, lines, items, content, overwrite=True)
with open(test_file, "r") as f:
updated = f.read()
# Verify updates and preservation
self.assertIn("scale_factor", updated) # Custom arg added with placeholder
self.assertIn("<fill_docstring>", updated) # Placeholder present
self.assertIn("Example:", updated) # Example preserved
self.assertIn("self.layer = None", updated) # __init__ code preserved
self.assertIn("return self.layer(input_ids) * scale_factor", updated) # forward code preserved
def test_typed_dict_field_detection(self):
"""Test that _find_typed_dict_classes correctly identifies custom fields vs standard inherited fields."""
content = textwrap.dedent("""
from typing import TypedDict
from transformers.processing_utils import ImagesKwargs
class CustomImageKwargs(ImagesKwargs, total=False):
'''
custom_mode (`str`):
Custom processing mode.
'''
# Standard field from ImagesKwargs - should be in all_fields but not fields
do_resize: bool
# Custom fields - should be in both all_fields and fields
custom_mode: str
undocumented_custom: int
""")
typed_dicts = _find_typed_dict_classes(content)
# Should find the TypedDict
self.assertEqual(len(typed_dicts), 1)
self.assertEqual(typed_dicts[0]["name"], "CustomImageKwargs")
# all_fields includes everything
self.assertIn("do_resize", typed_dicts[0]["all_fields"])
self.assertIn("custom_mode", typed_dicts[0]["all_fields"])
self.assertIn("undocumented_custom", typed_dicts[0]["all_fields"])
# fields only includes custom fields (not standard args like do_resize)
# Both documented and undocumented custom fields are included
self.assertIn("custom_mode", typed_dicts[0]["fields"])
self.assertIn("undocumented_custom", typed_dicts[0]["fields"])
self.assertNotIn("do_resize", typed_dicts[0]["fields"]) # Standard arg excluded
def test_file_discovery_finds_decorated_files(self):
"""Test that check_auto_docstrings can discover files containing @auto_docstring."""
with tempfile.TemporaryDirectory() as tmpdir:
has_decorator = os.path.join(tmpdir, "modeling.py")
no_decorator = os.path.join(tmpdir, "utils.py")
with open(has_decorator, "w") as f:
f.write("@auto_docstring\ndef forward(self): pass")
with open(no_decorator, "w") as f:
f.write("def helper(): pass")
found = find_files_with_auto_docstring([has_decorator, no_decorator])
self.assertEqual(len(found), 1)
self.assertEqual(found[0], has_decorator)
class DummyConfig(PretrainedConfig):
model_type = "dummy_test"
def __init__(self, vocab_size=1000, hidden_size=768, num_attention_heads=12, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
@auto_docstring
class DummyForTestModel(PreTrainedModel):
config_class = DummyConfig
def __init__(self, config: DummyConfig):
super().__init__(config)
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
position_ids: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
temperature: float = 1.0,
custom_dict: dict[str, int | float] | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
) -> CausalLMOutputWithPast:
r"""
temperature (`float`, *optional*, defaults to 1.0):
Temperature value for scaling logits during generation.
custom_dict (`dict[str, Union[int, float]]`, *optional*):
Custom dictionary parameter with string keys and numeric values.
Example:
```python
>>> from transformers import AutoTokenizer, DummyForTestModel
>>> import torch
>>> model = DummyForTestModel.from_pretrained("dummy-model")
>>> tokenizer = AutoTokenizer.from_pretrained("dummy-model")
>>> inputs = tokenizer("Hello world", return_tensors="pt")
>>> outputs = model.forward(**inputs, temperature=0.7)
>>> logits = outputs.logits
```
"""
pass
class ComplexProcessorKwargs(ProcessingKwargs, total=False):
r"""
custom_processing_mode (`str`, *optional*, defaults to `"standard"`):
Custom processing mode for advanced text/image processing. Can be 'standard', 'enhanced', or 'experimental'.
enable_advanced_features (`bool`, *optional*, defaults to `False`):
Whether to enable advanced processing features like custom tokenization strategies.
custom_threshold (`float`, *optional*, defaults to 0.5):
Custom threshold value for filtering or processing decisions.
output_format (`str`, *optional*, defaults to `"default"`):
Output format specification. Can be 'default', 'extended', or 'minimal'.
"""
custom_processing_mode: str
enable_advanced_features: bool
custom_threshold: float
output_format: str
@auto_docstring
class DummyProcessorForTest(ProcessorMixin):
def __init__(
self,
image_processor=None,
tokenizer=None,
custom_processing_mode="standard",
enable_advanced_features=False,
custom_threshold=0.5,
output_format="default",
**kwargs,
):
r"""
custom_processing_mode (`str`, *optional*, defaults to `"standard"`):
Custom processing mode for advanced text/image processing. Can be 'standard', 'enhanced', or 'experimental'.
enable_advanced_features (`bool`, *optional*, defaults to `False`):
Whether to enable advanced processing features like custom tokenization strategies.
custom_threshold (`float`, *optional*, defaults to 0.5):
Custom threshold value for filtering or processing decisions.
output_format (`str`, *optional*, defaults to `"default"`):
Output format specification. Can be 'default', 'extended', or 'minimal'.
"""
pass
@auto_docstring
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[ComplexProcessorKwargs],
) -> BatchFeature:
r"""
Example:
```python
>>> from transformers import DummyProcessorForTest
>>> processor = DummyProcessorForTest.from_pretrained("dummy-processor")
>>> inputs = processor(text="Hello world", images=["image.jpg"], return_tensors="pt")
```
"""
pass
class DummyImageProcessorKwargs(ImagesKwargs, total=False):
r"""
image_grid_pinpoints (`list[list[int]]`, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
custom_scale (`float`, *optional*, defaults to 255.0):
Custom scale factor for preprocessing pipelines.
"""
image_grid_pinpoints: list[list[int]]
custom_scale: float
@auto_docstring(
custom_intro="""
Constructs a fast DummyForTest image processor.
"""
)
class DummyForTestImageProcessorFast(BaseImageProcessorFast):
model_input_names = ["pixel_values"]
valid_kwargs = DummyImageProcessorKwargs
def __init__(self, **kwargs: Unpack[DummyImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(
self,
images: ImageInput,
**kwargs: Unpack[DummyImageProcessorKwargs],
) -> BatchFeature:
r"""
Example:
```python
>>> from transformers import DummyForTestImageProcessorFast
>>> from PIL import Image
>>> import requests
>>> processor = DummyForTestImageProcessorFast.from_pretrained("dummy-processor")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor.preprocess(images=image, return_tensors="pt")
```
"""
pass
@require_torch
class TestFullDocstringGeneration(unittest.TestCase):
"""
End-to-end tests for @auto_docstring runtime docstring generation.
Tests validate complete docstrings with single assertEqual assertions to ensure structure,
formatting, standard args, custom params, and TypedDict unrolling work correctly.
"""
def test_dummy_model_complete_docstring(self):
self.maxDiff = None
"""Test complete class and forward method docstrings for PreTrainedModel with ModelArgs and custom parameters."""
actual_class_docstring = DummyForTestModel.__doc__
expected_class_docstring = """
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`DummyConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
self.assertEqual(actual_class_docstring, expected_class_docstring)
actual_docstring = DummyForTestModel.forward.__doc__
expected_docstring = """ The [`DummyForTestModel`] forward method, overrides the `__call__` special method.
<Tip>
Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]
instance afterwards instead of this since the former takes care of running the pre and post processing steps while
the latter silently ignores them.
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
temperature (`float`, *optional*, defaults to 1.0):
Temperature value for scaling logits during generation.
custom_dict (`dict[str, Union[int, float]]`, *optional*):
Custom dictionary parameter with string keys and numeric values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
[`~modeling_outputs.CausalLMOutputWithPast`] or `tuple(torch.FloatTensor)`: A [`~modeling_outputs.CausalLMOutputWithPast`] or a tuple of
`torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various
elements depending on the configuration ([`None`]) and inputs.
- **loss** (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) -- Language modeling loss (for next-token prediction).
- **logits** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
- **past_key_values** (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`) -- It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
- **hidden_states** (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`) -- Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
- **attentions** (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`) -- Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example:
```python
>>> from transformers import AutoTokenizer, DummyForTestModel
>>> import torch
>>> model = DummyForTestModel.from_pretrained("dummy-model")
>>> tokenizer = AutoTokenizer.from_pretrained("dummy-model")
>>> inputs = tokenizer("Hello world", return_tensors="pt")
>>> outputs = model.forward(**inputs, temperature=0.7)
>>> logits = outputs.logits
```
"""
self.assertEqual(actual_docstring, expected_docstring)
def test_dummy_processor_complete_docstring(self):
self.maxDiff = None
"""Test complete class and __call__ docstrings for ProcessorMixin with complex TypedDict kwargs unrolling."""
actual_docstring = DummyProcessorForTest.__call__.__doc__
expected_docstring = """ Args:
images (`Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, list[PIL.Image.Image], list[numpy.ndarray], list[torch.Tensor]]`, *optional*):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
text (`Union[str, list[str], list[list[str]]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If you pass a pretokenized input, set `is_split_into_words=True` to avoid ambiguity with batched inputs.
custom_processing_mode (`str`, *kwargs*, *optional*, defaults to `"standard"`):
Custom processing mode for advanced text/image processing. Can be 'standard', 'enhanced', or 'experimental'.
enable_advanced_features (`bool`, *kwargs*, *optional*, defaults to `False`):
Whether to enable advanced processing features like custom tokenization strategies.
custom_threshold (`float`, *kwargs*, *optional*, defaults to 0.5):
Custom threshold value for filtering or processing decisions.
output_format (`str`, *kwargs*, *optional*, defaults to `"default"`):
Output format specification. Can be 'default', 'extended', or 'minimal'.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
**kwargs ([`ProcessingKwargs`], *optional*):
Additional processing options for each modality (text, images, videos, audio). Model-specific parameters
are listed above; see the TypedDict class for the complete list of supported arguments.
Returns:
`~image_processing_base.BatchFeature`:
- **data** (`dict`) -- Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).
- **tensor_type** (`Union[None, str, TensorType]`, *optional*) -- You can give a tensor_type here to convert the lists of integers in PyTorch/Numpy Tensors at
initialization.
Example:
```python
>>> from transformers import DummyProcessorForTest
>>> processor = DummyProcessorForTest.from_pretrained("dummy-processor")
>>> inputs = processor(text="Hello world", images=["image.jpg"], return_tensors="pt")
```
"""
self.assertEqual(actual_docstring, expected_docstring)
actual_class_docstring = DummyProcessorForTest.__doc__
expected_class_docstring = """Constructs a DummyProcessorForTest which wraps a image processor and a tokenizer into a single processor.
[`DummyProcessorForTest`] offers all the functionalities of [`image_processor_class`] and [`tokenizer_class`]. See the
[`~image_processor_class`] and [`~tokenizer_class`] for more information.
Parameters:
image_processor (`image_processor_class`):
The image processor is a required input.
tokenizer (`tokenizer_class`):
The tokenizer is a required input.
custom_processing_mode (`str`, *optional*, defaults to `"standard"`):
Custom processing mode for advanced text/image processing. Can be 'standard', 'enhanced', or 'experimental'.
enable_advanced_features (`bool`, *optional*, defaults to `False`):
Whether to enable advanced processing features like custom tokenization strategies.
custom_threshold (`float`, *optional*, defaults to 0.5):
Custom threshold value for filtering or processing decisions.
output_format (`str`, *optional*, defaults to `"default"`):
Output format specification. Can be 'default', 'extended', or 'minimal'.
"""
self.assertEqual(actual_class_docstring, expected_class_docstring)
def test_dummy_image_processor_complete_docstring(self):
self.maxDiff = None
"""Test complete class and preprocess docstrings for BaseImageProcessorFast with custom ImagesKwargs and custom_intro."""
actual_preprocess_docstring = DummyForTestImageProcessorFast.preprocess.__doc__
expected_preprocess_docstring = """ Args:
images (`Union[PIL.Image.Image, numpy.ndarray, torch.Tensor, list[PIL.Image.Image], list[numpy.ndarray], list[torch.Tensor]]`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
image_grid_pinpoints (`list[list[int]]`, *kwargs*, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
custom_scale (`float`, *kwargs*, *optional*, defaults to 255.0):
Custom scale factor for preprocessing pipelines.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
Returns stacked tensors if set to `'pt'`, otherwise returns a list of tensors.
**kwargs ([`ImagesKwargs`], *optional*):
Additional image preprocessing options. Model-specific kwargs are listed above; see the TypedDict class
for the complete list of supported arguments.
Returns:
`~image_processing_base.BatchFeature`:
- **data** (`dict`) -- Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).
- **tensor_type** (`Union[None, str, TensorType]`, *optional*) -- You can give a tensor_type here to convert the lists of integers in PyTorch/Numpy Tensors at
initialization.
Example:
```python
>>> from transformers import DummyForTestImageProcessorFast
>>> from PIL import Image
>>> import requests
>>> processor = DummyForTestImageProcessorFast.from_pretrained("dummy-processor")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor.preprocess(images=image, return_tensors="pt")
```
"""
self.assertEqual(actual_preprocess_docstring, expected_preprocess_docstring)
actual_class_docstring = DummyForTestImageProcessorFast.__doc__
expected_class_docstring = """
Constructs a fast DummyForTest image processor.
Args:
image_grid_pinpoints (`list[list[int]]`, *kwargs*, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
custom_scale (`float`, *kwargs*, *optional*, defaults to 255.0):
Custom scale factor for preprocessing pipelines.
**kwargs ([`ImagesKwargs`], *optional*):
Additional image preprocessing options. Model-specific kwargs are listed above; see the TypedDict class
for the complete list of supported arguments.
"""
self.assertEqual(actual_class_docstring, expected_class_docstring)
# ---------------------------------------------------------------------------
# Performance tests for auto_docstring
# ---------------------------------------------------------------------------
class TestAutoDocstringPerformance:
"""
Performance tests for auto_docstring.
The decorator runs at *class-definition / import time*, so with hundreds of
models in the library the cumulative cost matters even though each individual
call looks cheap. These tests assert an upper bound to catch regressions.
"""
# Upper bound (%) of total import time that auto_docstring overhead may take.
# Relative metric; robust across CI vs local. Catches serious regressions.
AUTO_DOCSTRING_COST_PCT_UPPER_BOUND = 70.0
def test_auto_docstring_import_time_upper_bound(self):
"""
Asserts that auto_docstring overhead stays below a percentage of total
import time.
Method
------
1. Collect ``modeling_*.py``, ``image_processing_*.py``, ``processing_*.py``
under ``transformers/models``, then sample every 10th for speed.
2. Warmup: import the sampled modules once so Python's bytecode cache is hot.
3. Measure WITH auto_docstring: clear cache, re-import, median over 5 runs.
4. Measure WITHOUT auto_docstring: noop-patch, clear cache, re-import, median.
5. cost_pct = (real - noop) / real * 100; assert cost_pct < upper bound.
"""
if "transformers.utils" not in sys.modules:
importlib.import_module("transformers.utils")
_utils_module = sys.modules["transformers.utils"]
src_root = Path(__file__).resolve().parent.parent.parent / "src"
models_dir = src_root / "transformers" / "models"
all_modules: list[str] = []
for pattern in ("modeling_*.py", "image_processing_*.py", "processing_*.py"):
for f in sorted(models_dir.rglob(pattern)):
rel = f.with_suffix("").relative_to(src_root)
all_modules.append(".".join(rel.parts))
model_modules = all_modules[::10]
def _clear():
for key in [k for k in sys.modules if k.startswith("transformers.models")]:
del sys.modules[key]
def _import_all():
for mod in model_modules:
try:
importlib.import_module(mod)
except Exception:
continue
_import_all() # warmup
# With auto_docstring (real)
times_real: list[float] = []
for _ in range(5):
_clear()
t0 = time.perf_counter()
_import_all()
times_real.append(time.perf_counter() - t0)
# Without auto_docstring (noop patch)
_orig = _utils_module.auto_docstring
_noop = lambda x=None, **kw: (lambda f: f) if x is None else x # noqa: E731
times_noop: list[float] = []
for _ in range(5):
_utils_module.auto_docstring = _noop
try:
_clear()
t0 = time.perf_counter()
_import_all()
times_noop.append(time.perf_counter() - t0)
finally:
_utils_module.auto_docstring = _orig
median_real = statistics.median(times_real)
median_noop = statistics.median(times_noop)
cost_pct = (median_real - median_noop) / median_real * 100 if median_real > 0 else 0.0
print(f"Cost percentage: {cost_pct:.1f}%")
assert cost_pct < self.AUTO_DOCSTRING_COST_PCT_UPPER_BOUND, (
f"auto_docstring cost {cost_pct:.1f}% of import time exceeds upper bound "
f"{self.AUTO_DOCSTRING_COST_PCT_UPPER_BOUND}% "
f"({len(model_modules)} of {len(all_modules)} modules)"
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/utils/test_auto_docstring.py",
"license": "Apache License 2.0",
"lines": 594,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/swin2sr/image_processing_swin2sr_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Swin2SR."""
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature, ChannelDimension, get_image_size
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import ImageInput
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
logging,
)
from .image_processing_swin2sr import Swin2SRImageProcessorKwargs
logger = logging.get_logger(__name__)
@auto_docstring
class Swin2SRImageProcessorFast(BaseImageProcessorFast):
do_rescale = True
rescale_factor = 1 / 255
do_pad = True
size_divisor = 8
valid_kwargs = Swin2SRImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Swin2SRImageProcessorKwargs]):
pad_size = kwargs.pop("pad_size", None)
kwargs.setdefault("size_divisor", pad_size)
super().__init__(**kwargs)
def preprocess(self, images: ImageInput, **kwargs: Unpack[Swin2SRImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def pad(self, images: "torch.Tensor", size_divisor: int) -> "torch.Tensor":
"""
Pad an image to make the height and width divisible by `size_divisor`.
Args:
images (`torch.Tensor`):
Images to pad.
size_divisor (`int`):
The size to make the height and width divisible by.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = get_image_size(images, ChannelDimension.FIRST)
pad_height = (height // size_divisor + 1) * size_divisor - height
pad_width = (width // size_divisor + 1) * size_divisor - width
return tvF.pad(
images,
(0, 0, pad_width, pad_height),
padding_mode="symmetric",
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_rescale: bool,
rescale_factor: float,
do_pad: bool,
size_divisor: int,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_image_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, scale=rescale_factor)
if do_pad:
stacked_images = self.pad(stacked_images, size_divisor=size_divisor)
processed_image_grouped[shape] = stacked_images
processed_images = reorder_images(processed_image_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["Swin2SRImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/swin2sr/image_processing_swin2sr_fast.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/vipllava/modular_vipllava.py | # Copyright 2023 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from transformers.models.llava.modeling_llava import (
LlavaCausalLMOutputWithPast,
LlavaForConditionalGeneration,
LlavaModel,
LlavaModelOutputWithPast,
LlavaPreTrainedModel,
)
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...modeling_outputs import BaseModelOutputWithPooling
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import can_return_tuple
from .configuration_vipllava import VipLlavaConfig
logger = logging.get_logger(__name__)
class VipLlavaModelOutputWithPast(LlavaModelOutputWithPast):
pass
class VipLlavaCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
class VipLlavaMultiModalProjector(nn.Module):
def __init__(self, config: VipLlavaConfig):
super().__init__()
num_feature_layers = 1 if isinstance(config.vision_feature_layers, int) else len(config.vision_feature_layers)
self.projector_layernorm = nn.LayerNorm(
num_feature_layers * config.vision_config.hidden_size, eps=config.projector_layernorm_eps
)
self.linear_1 = nn.Linear(
num_feature_layers * config.vision_config.hidden_size,
config.text_config.hidden_size,
bias=True,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
def forward(self, hidden_states):
hidden_states = self.projector_layernorm(hidden_states)
hidden_states = self.linear_1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
class VipLlavaPreTrainedModel(LlavaPreTrainedModel):
pass
class VipLlavaModel(LlavaModel):
@can_return_tuple
@auto_docstring(
custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection."
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layers: int | list[int] | None = None,
output_hidden_states: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layers (`Union[int, list[int]]`, *optional*):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
"""
vision_feature_layers = (
vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
)
image_outputs = self.vision_tower(
pixel_values,
output_hidden_states=True, # Ignore arg on purpose
return_dict=True,
**kwargs,
)
# If multiple feature layers are provided (which is usually the case)
# then the image features are concatenated after the CLS is removed.
if isinstance(vision_feature_layers, int):
image_features = image_outputs.hidden_states[vision_feature_layers][:, 1:]
else:
# Usually, we select the features from index 1: the layers -2, -5, -8, -11 and 6
image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers]
image_features = torch.cat(image_features, dim=-1)
image_features = self.multi_modal_projector(image_features)
image_outputs.pooler_output = image_features
return image_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
vision_feature_layers: int | list[int] | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
**lm_kwargs,
) -> tuple | VipLlavaModelOutputWithPast:
r"""
vision_feature_layers (`Union[int, list[int]]`, *optional*):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layers = (
vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values, vision_feature_layers=vision_feature_layers, return_dict=True
).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
output = VipLlavaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
return output if return_dict else output.to_tuple()
class VipLlavaForConditionalGeneration(LlavaForConditionalGeneration):
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layers: int | list[int] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
r"""
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layers (`Union[int, list[int]]`, *optional*):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
"""
return self.model.get_image_features(
pixel_values=pixel_values, vision_feature_layers=vision_feature_layers, **kwargs
)
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
vision_feature_layers: int | list[int] | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**lm_kwargs,
) -> tuple | VipLlavaCausalLMOutputWithPast:
r"""
vision_feature_layers (`Union[int, list[int]]`, *optional*):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import torch
>>> from PIL import Image
>>> import httpx
>>> from io import BytesIO
>>> from transformers import AutoProcessor, VipLlavaForConditionalGeneration
>>> model = VipLlavaForConditionalGeneration.from_pretrained("llava-hf/vip-llava-7b-hf", device_map="auto", dtype=torch.float16)
>>> processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf")
>>> prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: <image>\n{}###Assistant:"
>>> question = "Can you please describe this image?"
>>> prompt = prompt.format(question)
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> inputs = processor(text=text, images=image, return_tensors="pt").to(0, torch.float16)
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=20)
>>> processor.decode(generate_ids[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
The image features a brown and white cat sitting on a green surface, with a red ball in its
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layers = (
vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
vision_feature_layers=vision_feature_layers,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
return VipLlavaCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
__all__ = ["VipLlavaModel", "VipLlavaForConditionalGeneration", "VipLlavaPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/vipllava/modular_vipllava.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/csm/configuration_csm.py | # Copyright 2025 Sesame and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
logger = logging.get_logger(__name__)
class CsmDepthDecoderConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CsmDepthDecoderModel`]. It is used to instantiate an CSM depth decoder
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield
a similar configuration to that of the csm-1b.
e.g. [sesame/csm-1b](https://huggingface.co/sesame/csm-1b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_codebooks (`int`, *optional*, defaults to 32):
Number of codebooks used in the underlying codec model responsible for tokenizing the audio.
backbone_hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations of the backbone model used with this depth decoder.
vocab_size (`int`, *optional*, defaults to 2051):
Vocabulary size of the CsmDepthDecoder model. Defines the number of different audio tokens that can be represented by each codebook.
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 4):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 33):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2050):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import CsmDepthDecoder, CsmDepthDecoderConfig
>>> # Initializing a CsmDepthDecoder
>>> configuration = CsmDepthDecoderConfig()
>>> model = CsmDepthDecoderModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "csm_depth_decoder_model"
base_config_key = "depth_decoder_config"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"codebook_size": "vocab_size",
}
default_theta = 500000.0
def __init__(
self,
num_codebooks: int | None = 32,
backbone_hidden_size: int | None = 2048,
vocab_size: int | None = 2051,
hidden_size: int | None = 1024,
intermediate_size: int | None = 8192,
num_hidden_layers: int | None = 4,
num_attention_heads: int | None = 8,
num_key_value_heads: int | None = 2,
hidden_act: int | None = "silu",
max_position_embeddings: int | None = 33,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = None,
eos_token_id: int | None = None,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
head_dim: int | None = None,
**kwargs,
):
if kwargs.pop("tie_word_embeddings", False):
raise ValueError("`tie_word_embeddings=True` is not supported for CsmDepthDecoderConfig")
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.num_codebooks = num_codebooks
self.vocab_size = vocab_size
self.backbone_hidden_size = backbone_hidden_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
super().__init__(**kwargs)
class CsmConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CsmForConditionalGeneration`]. It is used to instantiate an CSM
model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the csm-1b.
e.g. [sesame/csm-1b](https://huggingface.co/sesame/csm-1b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_codebooks (`int`, *optional*, defaults to 32):
Number of codebooks used in the underlying codec model responsible for tokenizing the audio.
vocab_size (`int`, *optional*, defaults to 2051):
Vocabulary size of the Csm model. Defines the number of different audio tokens that can be represented by each codebook.
text_vocab_size (`int`, *optional*, defaults to 128256):
Vocabulary size of the text input for the Csm model. Defines the number of different text tokens that can be represented.
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations of the backbone model.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations of the backbone model.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the backbone model Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the backbone model Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the backbone model Transformer decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 128002):
Padding token id.
codebook_pad_token_id (`int`, *optional*, defaults to 2050):
Padding token id for codebook tokens.
codebook_eos_token_id (`int`, *optional*, defaults to 0):
End of stream token id for codebook tokens.
bos_token_id (`int`, *optional*, defaults to 128000):
Beginning of stream token id.
eos_token_id (`int`, *optional*):
End of stream token id.
audio_token_id (`int`, *optional*, defaults to 128002):
Audio token id in the text input.
audio_eos_token_id (`int`, *optional*, defaults to 128003):
End of stream token id for audio in the text input.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
tie_codebooks_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie the codebook tokens embeddings of the backbone model to the codebook tokens embeddings of the depth decoder.
depth_decoder_config (`CsmDepthDecoderConfig`, *optional*):
Configuration for the depth decoder.
codec_config (`PreTrainedConfig`, *optional*):
Configuration for the codec.
```python
>>> from transformers import CsmForConditionalGeneration, CsmConfig
>>> # Initializing a CsmConfig
>>> configuration = CsmConfig()
>>> # Initializing a model
>>> model = CsmForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "csm"
base_config_key = "csm_config"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 500000.0
sub_configs = {
"codec_config": AutoConfig,
"depth_decoder_config": CsmDepthDecoderConfig,
}
attribute_map = {
"codebook_size": "vocab_size",
}
def __init__(
self,
num_codebooks: int | None = 32,
vocab_size: int | None = 2051,
text_vocab_size: int | None = 128256,
hidden_size: int | None = 2048,
intermediate_size: int | None = 8192,
num_hidden_layers: int | None = 16,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = 8,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = 128002,
codebook_pad_token_id: int | None = 2050,
codebook_eos_token_id: int | None = 0,
bos_token_id: int | None = 128000,
eos_token_id: int | None = None,
audio_token_id: int | None = 128002,
audio_eos_token_id: int | None = 128003,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
head_dim: int | None = None,
tie_codebooks_embeddings: bool | None = True,
depth_decoder_config: dict | None = None,
codec_config: dict | None = None,
**kwargs,
):
if kwargs.pop("tie_word_embeddings", False):
raise ValueError("`tie_word_embeddings=True` is not supported for CsmConfig")
if depth_decoder_config is None:
self.depth_decoder_config = CsmDepthDecoderConfig()
logger.info("depth_decoder_config is None, using default depth decoder config.")
elif isinstance(depth_decoder_config, dict):
self.depth_decoder_config = CsmDepthDecoderConfig(**depth_decoder_config)
elif isinstance(depth_decoder_config, CsmDepthDecoderConfig):
self.depth_decoder_config = depth_decoder_config
if codec_config is None:
self.codec_config = AutoConfig.for_model("mimi")
logger.info("codec_config is None, using default audio encoder config.")
elif isinstance(codec_config, dict):
self.codec_config = AutoConfig.for_model(**codec_config)
elif isinstance(codec_config, PreTrainedConfig):
self.codec_config = codec_config
self.text_vocab_size = text_vocab_size
self.num_codebooks = num_codebooks
self.audio_token_id = audio_token_id
self.audio_eos_token_id = audio_eos_token_id
self.codebook_pad_token_id = codebook_pad_token_id
self.codebook_eos_token_id = codebook_eos_token_id
self.tie_codebooks_embeddings = tie_codebooks_embeddings
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = False
super().__init__(**kwargs)
__all__ = [
"CsmDepthDecoderConfig",
"CsmConfig",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/csm/configuration_csm.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/csm/generation_csm.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Optional
import torch
import torch.nn as nn
from ...generation import (
GenerateDecoderOnlyOutput,
GenerationConfig,
GenerationMixin,
GenerationMode,
)
from ...generation.logits_process import LogitsProcessorList
from ...generation.stopping_criteria import MaxLengthCriteria, StoppingCriteriaList
from ...generation.utils import GenerateNonBeamOutput
from ...utils import logging
if TYPE_CHECKING:
from ...generation.streamers import BaseStreamer
logger = logging.get_logger(__name__)
@dataclass
class CsmGenerateOutput(GenerateDecoderOnlyOutput):
"""
Outputs of CsmForConditionalGeneration.generate.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True`):
Returns the model cache, used to speed up decoding. Different models have a different cache format, check
audio (`list(torch.FloatTensor)` of length `batch_size`):
The generated audio.
"""
audio: list[torch.Tensor] | None = None
class CsmGenerationMixin(GenerationMixin):
def _get_stopping_criteria(
self,
*args,
**kwargs,
) -> StoppingCriteriaList:
criteria = super()._get_stopping_criteria(*args, **kwargs)
kept_criteria = StoppingCriteriaList()
for criterion in criteria:
if not isinstance(criterion, MaxLengthCriteria):
logger.warning(
f"Csm does not support {criterion.__class__.__name__} stopping criteria, it will be ignored."
)
else:
kept_criteria.append(criterion)
return kept_criteria
def _prepare_generation_config(
self, generation_config: GenerationConfig | None, **kwargs: Any
) -> tuple[GenerationConfig, dict]:
"""
This method overrides [~generation.utils.GenerationMixin._prepare_generation_config].
It ensures that the depth decoder generation config is initialized and that passed args as depth_decoder_* are properly handled.
"""
# extract depth decoder kwargs and remove them from the main kwargs
depth_decoder_kwargs = {
k[len("depth_decoder_") :]: v for k, v in kwargs.items() if k.startswith("depth_decoder_")
}
# remove the depth decoder keys from the original kwargs
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("depth_decoder_")}
# initialize the generation config
generation_config, model_kwargs = super()._prepare_generation_config(generation_config, **kwargs)
self.depth_decoder.generation_config.update(**depth_decoder_kwargs)
# ensure the depth decoder generation config is valid
depth_decoder_min_new_tokens = getattr(self.depth_decoder.generation_config, "min_new_tokens") or (
self.config.num_codebooks - 1
)
depth_decoder_max_new_tokens = getattr(self.depth_decoder.generation_config, "max_new_tokens") or (
self.config.num_codebooks - 1
)
if {depth_decoder_min_new_tokens, depth_decoder_max_new_tokens} != {self.config.num_codebooks - 1}:
raise ValueError(
f"depth_decoder_generation_config's min_new_tokens ({depth_decoder_min_new_tokens}) and max_new_tokens ({depth_decoder_max_new_tokens}) must be equal to self.config.num_codebooks - 1 ({self.config.num_codebooks - 1})"
)
elif self.depth_decoder.generation_config.return_dict_in_generate:
logger.warning(
"depth_decoder_generation_config.return_dict_in_generate is set to True, but this will be ignored as the depth decoder model does not return a dictionary in generate"
)
self.depth_decoder.generation_config.return_dict_in_generate = False
self.depth_decoder.generation_config.min_new_tokens = depth_decoder_min_new_tokens
self.depth_decoder.generation_config.max_new_tokens = depth_decoder_max_new_tokens
# Monkey patch the get_generation_mode method to support CSM model
original_get_generation_mode = generation_config.get_generation_mode
def patched_get_generation_mode(assistant_model=None):
generation_mode = original_get_generation_mode(assistant_model)
if generation_mode not in [GenerationMode.GREEDY_SEARCH, GenerationMode.SAMPLE]:
raise ValueError(
f"Generation mode {generation_mode} is not supported for CSM model. Please set generation parameters to use greedy or sampling generation."
)
return generation_mode
generation_config.get_generation_mode = patched_get_generation_mode
return generation_config, model_kwargs
def _sample(
self,
input_ids: torch.LongTensor,
logits_processor: LogitsProcessorList,
stopping_criteria: StoppingCriteriaList,
generation_config: GenerationConfig,
synced_gpus: bool = False,
streamer: Optional["BaseStreamer"] = None,
**model_kwargs,
) -> GenerateNonBeamOutput | torch.LongTensor:
"""
This method overrides [~generation.utils.GenerationMixin._sample].
To ease maintenance, modifications are marked with the comment "Csm specific".
Indeed, Csm model requires a custom generation sampling step:
1. Infer the backbone model to sample the first codebook token
2. Call generate on the depth decoder with the first codebook token as input_ids to sample the next codebook tokens
3. Use these generated codebook tokens as input_ids to sample the next first codebook token using the backbone model
4. Repeat until stopping criteria is met
Csm supports two stopping criteria:
- stop when the generated sequence is at max_length
- stop when all the generated codebook tokens are the codebook_eos_token_id
"""
# init values
# *************** Csm specific ***************
pad_token_id = self.config.codebook_pad_token_id
has_eos_stopping_criteria = generation_config._eos_token_tensor is not None
# ============================================
output_attentions = generation_config.output_attentions
output_hidden_states = generation_config.output_hidden_states
output_scores = generation_config.output_scores
output_logits = generation_config.output_logits
return_dict_in_generate = generation_config.return_dict_in_generate
do_sample = generation_config.do_sample
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
raw_logits = () if (return_dict_in_generate and output_logits) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# keep track of which sequences are already finished
batch_size, cur_len = input_ids.shape[:2]
this_peer_finished = False
unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
model_kwargs = self._get_initial_cache_position(cur_len, input_ids.device, model_kwargs)
# *************** Csm specific ***************
if input_ids.ndim == 2 and model_kwargs.get("inputs_embeds") is None:
# in the case where the passed input_ids correspond to text tokens, i.e. don't have a third dimension for codebook ids,
# we need to remove the input length to the MaxLengthCriteria stopping criteria has such input are not returned
for criterion in stopping_criteria:
if isinstance(criterion, MaxLengthCriteria):
criterion.max_length -= cur_len
# ============================================
model_forward = (
self.get_compiled_call(generation_config.compile_config)
if self._valid_auto_compile_criteria(model_kwargs, generation_config)
else self.__call__
)
# *************** Csm specific ***************
model_kwargs.update({"output_hidden_states": True})
prefill_consumed = False
outputs = self._prefill(
input_ids,
generation_config,
model_kwargs,
is_first_iteration=not generation_config.is_assistant,
)
while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device):
if prefill_consumed:
next_sequence_length = 1 if model_kwargs["use_cache"] else None
model_inputs = self.prepare_inputs_for_generation(
input_ids, next_sequence_length=next_sequence_length, **model_kwargs
)
# prepare variable output controls (note: some models won't accept all output controls)
model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
outputs = model_forward(**model_inputs, return_dict=True)
prefill_consumed = True
# synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
model_kwargs = self._update_model_kwargs_for_generation(
outputs,
model_kwargs,
)
if synced_gpus and this_peer_finished:
continue
# Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
# (the clone itself is always small)
next_token_logits = outputs.logits[:, -1, :].clone().float()
next_token_logits = next_token_logits.to(input_ids.device)
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_logits:
raw_logits += (next_token_logits,)
if output_attentions:
decoder_attentions += (outputs.attentions,)
if output_hidden_states:
decoder_hidden_states += (outputs.hidden_states,)
# token selection
if do_sample:
probs = nn.functional.softmax(next_token_scores, dim=-1)
# TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(next_token_scores, dim=-1)
# *************** Csm specific ***************
# infer the depth decoder
first_codebook_ids = next_tokens[:, None]
# adds place holder in position 0 that will be replaced by the backbone_last_hidden_state
depth_decoder_input_ids = nn.functional.pad(first_codebook_ids, (1, 0), value=0)
backbone_last_hidden_state = outputs.hidden_states[-1][:, -1, :]
depth_decoder_outputs = self.depth_decoder.generate(
input_ids=depth_decoder_input_ids, backbone_last_hidden_state=backbone_last_hidden_state.clone()
)
codebook_ids = (
depth_decoder_outputs
if isinstance(depth_decoder_outputs, torch.Tensor)
else depth_decoder_outputs.sequences
)
# remove the place holder in position 0
codebook_ids = codebook_ids[:, 1:]
next_tokens = codebook_ids
# finished sentences should have their next token be a padding token
if has_eos_stopping_criteria:
next_tokens = next_tokens * unfinished_sequences.unsqueeze(-1) + pad_token_id * (
1 - unfinished_sequences.unsqueeze(-1)
)
# update generated ids, model inputs, and length for next step
if input_ids.ndim == 2:
input_ids = next_tokens[:, None, :]
else:
input_ids = torch.cat([input_ids, next_tokens[:, None, :]], dim=1)
# ============================================
if streamer is not None:
streamer.put(next_tokens.cpu())
# *************** Csm specific ***************
# for the eos stopping criteria, is it expected that the eos token is the same for each codebook !!!!
unfinished_sequences = unfinished_sequences & ~(
input_ids[:, -1, :-1] == self.config.codebook_eos_token_id
).all(-1)
# ============================================
unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores)
this_peer_finished = unfinished_sequences.max() == 0
cur_len += 1
# This is needed to properly delete outputs.logits which may be very large for first iteration
# Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
del outputs
# *************** Csm specific ***************
del depth_decoder_outputs
# ============================================
if streamer is not None:
streamer.end()
if return_dict_in_generate:
return GenerateDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
logits=raw_logits,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
past_key_values=model_kwargs.get("past_key_values"),
)
else:
return input_ids
def generate(
self,
input_ids: torch.Tensor | None = None,
input_values: torch.Tensor | None = None,
input_values_cutoffs: torch.Tensor | None = None,
generation_config: GenerationConfig | None = None,
logits_processor: LogitsProcessorList | None = None,
stopping_criteria: StoppingCriteriaList | None = None,
synced_gpus: bool | None = None,
streamer: Optional["BaseStreamer"] = None,
output_audio: bool | None = False,
**kwargs,
) -> GenerateNonBeamOutput | torch.LongTensor:
r"""
This method overrides [`~generation.utils.GenerationMixin.generate`] to match the specifics of the Csm model.
Indeed, Csm model requires a custom generation sampling step:
1. Infer the backbone model to sample the first codebook token
2. Call generate on the depth decoder with the first codebook token as `input_ids` to sample the next codebook tokens
3. Use these generated codebook tokens as `input_ids` to sample the next first codebook token using the backbone model
4. Repeat until stopping criteria is met
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, do_sample=True)`.
</Tip>
Parameters:
inputs_ids (`torch.Tensor` of shape (batch_size, seq_length), *optional*):
The sequence used as a prompt for the backbone model.
input_values (`torch.Tensor` of shape (batch_size, channels, max_concatenated_audio_length), *optional*):
The batched audio input values, where each batch entry contains the concatenation of all audio segments for that entry.
These values will be encoded into codebook tokens using the codec model and merged with the text input ids provided in `input_ids`.
input_values_cutoffs (`torch.Tensor` of shape (batch_size, max_num_audio), *optional*):
Specify the end positions of audio segments within each batch entry, relative to the concatenated audio input.
If a batch entry has fewer segments than the maximum, it is padded with -1. For example, in a batch of 2 sequences
where the first contains 2 audio segments of length l1, and the second contains 1 audio segment of length l2,
the input_values_cutoffs would be: [[l1, 2 * l1], [l2, -1]].
generation_config ([`~generation.GenerationConfig`], *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which has the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complements the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. If your stopping criteria depends on the `scores` input, make
sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. This feature is
intended for advanced users.
synced_gpus (`bool`, *optional*):
Whether to continue running the while loop until max_length. Unless overridden, this flag will be set
to `True` if using `FullyShardedDataParallel` or DeepSpeed ZeRO Stage 3 with multiple GPUs to avoid
deadlocking if one GPU finishes generating before other GPUs. Otherwise, defaults to `False`.
streamer (`BaseStreamer`, *optional*):
Streamer object that will be used to stream the generated sequences. Generated tokens are passed
through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
output_audio (`bool`, *optional*):
Whether to return the generated audio.
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generation_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. Depth decoder specific kwargs should be prefixed with *depth_decoder_*.
Return:
[`CsmGenerateOutput`] or `torch.LongTensor` or `list[torch.FloatTensor]`: A [`CsmGenerateOutput`]
(if `return_dict_in_generate=True` or when `config.return_dict_in_generate=True`) or a `torch.LongTensor` when `output_audio=False`
or a `list[torch.FloatTensor]` otherwise.
Example:
```python
>>> from transformers import CsmProcessor, CsmForConditionalGeneration
>>> from datasets import load_dataset, Audio
>>> model_id = "sesame/csm-1b"
>>> torch_device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
>>> # ensure the audio is 24kHz
>>> ds = ds.cast_column("audio", Audio(sampling_rate=24000))
>>> conversation = []
>>> # prepare a conversation with text and corresponding audio
>>> for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]):
... conversation.append(
... {
... "role": f"{speaker_id}",
... "content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}],
... }
... )
>>> # text prompt
>>> conversation.append({"role": f"{ds[4]['speaker_id']}", "content": [{"type": "text", "text": ds[4]["text"]}]})
>>> inputs = processor.apply_chat_template(
... conversation,
... tokenize=True,
... return_dict=True,
... ).to(torch_device)
>>> model = CsmForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
>>> audio = model.generate(**inputs, output_audio=True)
>>> processor.save_audio(audio, "output.wav")
```
"""
generate_output = super().generate(
input_ids=input_ids,
input_values=input_values,
input_values_cutoffs=input_values_cutoffs,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
synced_gpus=synced_gpus,
streamer=streamer,
**kwargs,
)
generate_returned_dict = not isinstance(generate_output, torch.Tensor)
audio = None
if output_audio:
generated_audio_codes = generate_output.sequences if generate_returned_dict else generate_output
# infer the codec model
audio = []
with torch.no_grad():
# =======================================
# TODO: @eustlb, this should be batched !!!
# but requires making sure batched inference of the codec model works as intended
for audio_codes_batch in generated_audio_codes:
eos_idxs = (audio_codes_batch == self.config.codebook_eos_token_id).all(dim=-1).nonzero()
if eos_idxs.numel() != 0:
cutoff_idx = eos_idxs.min()
else:
cutoff_idx = audio_codes_batch.shape[0]
audio_codes_batch = audio_codes_batch[:cutoff_idx]
codec_decode_output = self.codec_model.decode(audio_codes_batch.transpose(0, 1).unsqueeze(0))
audio.append(codec_decode_output.audio_values[0, 0])
# =======================================
if generate_returned_dict:
return CsmGenerateOutput(audio=audio, **generate_output)
elif output_audio:
return audio
else:
return generate_output
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/csm/generation_csm.py",
"license": "Apache License 2.0",
"lines": 419,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/csm/modular_csm.py | # Copyright 2025 Sesame and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import torch
import torch.nn as nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...masking_utils import create_causal_mask
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.import_utils import is_torchdynamo_compiling
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaMLP,
LlamaModel,
LlamaRMSNorm,
LlamaRotaryEmbedding,
TransformersKwargs,
)
from .configuration_csm import CsmConfig, CsmDepthDecoderConfig
from .generation_csm import CsmGenerationMixin
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Base class for the model autoregressive outputs.
"""
)
class CsmOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
depth_decoder_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction) of the depth decoder model.
depth_decoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the depth decoder (scores for each vocabulary token before SoftMax).
depth_decoder_past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
depth_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
depth_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
backbone_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction) of the backbone model.
"""
loss: torch.FloatTensor | None = None
logits: torch.FloatTensor | None = None
past_key_values: Cache | None = None
hidden_states: tuple[torch.FloatTensor, ...] | None = None
attentions: tuple[torch.FloatTensor, ...] | None = None
depth_decoder_loss: torch.FloatTensor | None = None
depth_decoder_logits: torch.FloatTensor | None = None
depth_decoder_past_key_values: Cache | None = None
depth_decoder_hidden_states: tuple[torch.FloatTensor, ...] | None = None
depth_decoder_attentions: tuple[torch.FloatTensor, ...] | None = None
backbone_loss: torch.FloatTensor | None = None
# manually specify names for correct naming when converting from modular
class CsmRMSNorm(LlamaRMSNorm):
pass
class CsmRotaryEmbedding(LlamaRotaryEmbedding):
pass
class CsmMLP(LlamaMLP):
pass
class CsmAttention(LlamaAttention):
pass
class CsmDecoderLayer(LlamaDecoderLayer):
pass
@auto_docstring(
custom_intro="""
The bare Csm Model outputting raw hidden-states without any specific head on top.
"""
)
@auto_docstring
class CsmPreTrainedModel(PreTrainedModel):
config: CsmConfig
base_model_prefix = "model"
input_modalities = ("audio", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["CsmDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
# does not because of Mimi codec model
# _supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": CsmDecoderLayer,
"attentions": CsmAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, CsmCodebooksHead):
num_codebooks = module.num_codebooks
for i in range(num_codebooks - 1):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, CsmBackboneModelEmbeddings):
init.copy_(module.audio_tokens_offsets, torch.arange(self.config.num_codebooks) * self.config.vocab_size)
@auto_docstring
class CsmDepthDecoderModel(LlamaModel, CsmPreTrainedModel):
config: CsmDepthDecoderConfig
def __init__(self, config):
super().__init__(config)
self.embed_tokens = nn.Embedding((config.num_codebooks * config.vocab_size), config.backbone_hidden_size)
self.inputs_embeds_projector = nn.Linear(config.backbone_hidden_size, config.hidden_size, bias=False)
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
backbone_last_hidden_state: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPast:
r"""
backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*):
The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model)
is provided in the `input_ids` argument.
"""
if position_ids is not None and not is_torchdynamo_compiling():
logger.warning_once(
"Custom `position_ids` were provided but will be ignored. CSM depth decoder automatically determines position_ids "
"from `cache_position` and as it requires them to be identical across the batch, the provided position_ids will be ignored."
)
position_ids = None
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds.")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
inputs_seq_length = inputs_embeds.shape[1] if inputs_embeds is not None else input_ids.shape[1]
device = inputs_embeds.device if inputs_embeds is not None else input_ids.device
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_seq_length, device=device)
if inputs_embeds is None:
codebook_idxs = torch.clamp(cache_position - 1, min=0)
offset = codebook_idxs * self.vocab_size
inputs_embeds = self.embed_tokens(input_ids + offset)
input_ids_are_first_codebook = cache_position[0] == 0
if backbone_last_hidden_state is not None:
inputs_embeds[:, 0] = backbone_last_hidden_state
else:
if not is_torchdynamo_compiling() and input_ids_are_first_codebook:
logger.warning(
"When the first codebook token is provided, `backbone_last_hidden_state` should also be provided for correct inference."
)
inputs_embeds = self.inputs_embeds_projector(inputs_embeds)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_ids = cache_position.unsqueeze(0)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
class CsmCodebooksHead(nn.Module):
def __init__(self, hidden_size, num_codebooks, vocab_size):
super().__init__()
self.num_codebooks = num_codebooks
self.weight = nn.Parameter(torch.empty(self.num_codebooks - 1, hidden_size, vocab_size))
def forward(self, hidden_states, cache_position=None):
if cache_position is None:
seq_length = hidden_states.shape[1]
codebook_weight = self.weight[torch.arange(seq_length)]
else:
codebook_idxs = cache_position - 1
codebook_weight = self.weight[codebook_idxs]
hidden_states = [
nn.functional.linear(hidden_states[:, codebook_idx, :], codebook_weight[codebook_idx].T)
for codebook_idx in range(codebook_weight.shape[0])
]
hidden_states = torch.stack(hidden_states, dim=1)
return hidden_states
@auto_docstring(
custom_intro="""
The CsmDepthDecoder Model transformer, with a [`CsmCodebooksHead`] on top,
which can be seen a position-specific language modeling head, allowing to use a different linear layer for each codebook
(e.g. position 0 is the first codebook and uses the first codebook head, etc.)
"""
)
class CsmDepthDecoderForCausalLM(LlamaForCausalLM, GenerationMixin):
_tied_weights_keys = None
_tp_plan = None
_pp_plan = None
def __init__(self, config):
super().__init__(config)
del self.lm_head
self.codebooks_head = CsmCodebooksHead(config.hidden_size, config.num_codebooks, config.vocab_size)
self.model = CsmDepthDecoderModel(config)
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
next_sequence_length: int | None = None,
past_key_values: Cache | None = None,
attention_mask: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids, next_sequence_length, past_key_values, attention_mask, inputs_embeds, cache_position, **kwargs
)
is_first_generation_step = model_inputs["cache_position"][0] == 0
if not is_first_generation_step:
model_inputs.pop("backbone_last_hidden_state")
# csm depth decoder does not use position_ids
model_inputs.pop("position_ids")
return model_inputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
backbone_last_hidden_state: torch.FloatTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | CausalLMOutputWithPast:
r"""
backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*):
The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model)
is provided in the `input_ids` argument.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
outputs = self.model(
input_ids=input_ids,
backbone_last_hidden_state=backbone_last_hidden_state,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
if isinstance(logits_to_keep, int):
if logits_to_keep == 0:
# skip idx 0 logits since it's for the concatenated backbone last hidden state
slice_indices = slice(1, None)
else:
slice_indices = slice(-logits_to_keep, None)
else:
slice_indices = logits_to_keep
logits = self.codebooks_head(
hidden_states[:, slice_indices, :], cache_position[slice_indices] if cache_position is not None else None
)
logits = logits.contiguous()
loss = None
if labels is not None:
shift_labels = labels[..., 1:].contiguous()
loss = self.loss_function(
logits=logits, labels=None, vocab_size=self.config.vocab_size, shift_labels=shift_labels, **kwargs
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class CsmBackboneModelEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.embed_audio_tokens = nn.Embedding((config.num_codebooks * config.codebook_size), config.hidden_size)
self.register_buffer(
"audio_tokens_offsets", torch.arange(config.num_codebooks) * config.codebook_size, persistent=False
)
def forward(self, input_ids):
inputs_embeds = self.embed_audio_tokens(input_ids + self.audio_tokens_offsets)
inputs_embeds = inputs_embeds.sum(dim=2)
return inputs_embeds
@auto_docstring
class CsmBackboneModel(LlamaModel):
def __init__(self, config):
super().__init__(config)
self.embed_tokens = CsmBackboneModelEmbeddings(config)
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(self, **super_kwargs):
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks) or (batch_size, sequence_length)`):
1. (batch_size, sequence_length): corresponds to the input sequence prepared with the processor from the text prompt. Such input
requires `input_values` to be provided so that audio can be encoded in codebook tokens and then merged with the text tokens.
2. (batch_size, sequence_length, num_codebooks): codebook tokens generated during the autoregressive decoding. Such input is not meant to be used by end users.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
return super().forward(**super_kwargs)
@auto_docstring(
custom_intro="""
The Csm model consists of two llama-like auto-regressive transformer models: a backbone model that predicts the first codebook token and a depth decoder that predicts the other codebook tokens.
"""
)
class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
_tied_weights_keys = {
"backbone_model.embed_tokens.embed_audio_tokens.weight": "depth_decoder.model.embed_tokens.weight"
}
def __init__(self, config):
super().__init__(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.embed_text_tokens = nn.Embedding(config.text_vocab_size, config.hidden_size)
self.backbone_model = CsmBackboneModel._from_config(config)
self.depth_decoder = CsmDepthDecoderForCausalLM._from_config(config.depth_decoder_config)
self.codec_model = AutoModel.from_config(config.codec_config)
self.post_init()
def get_input_embeddings(self):
return self.backbone_model.embed_tokens
def set_input_embeddings(self, value):
self.backbone_model.embed_tokens = value
@classmethod
def from_pretrained(cls, *args, **kwargs):
if kwargs.get("output_loading_info", False):
model, loading_info = super().from_pretrained(*args, **kwargs)
else:
model = super().from_pretrained(*args, **kwargs)
# copy depth decoder generation conf attr to the depth decoder generation config
prefix = "depth_decoder_"
prefix_len = len(prefix)
depth_decoder_attrs = {
attr[prefix_len:]: value
for attr, value in vars(model.generation_config).items()
if attr.startswith(prefix)
}
vars(model.depth_decoder.generation_config).update({"_from_model_config": False, **depth_decoder_attrs})
# remove the depth decoder generation conf attr from the model generation config
for attr in depth_decoder_attrs:
delattr(model.generation_config, prefix + attr)
if "output_loading_info" in kwargs:
return model, loading_info
else:
return model
def save_pretrained(self, *args, **kwargs):
# copy the depth decoder generation config attributes to the model generation config
prefix = "depth_decoder_"
depth_decoder_attrs = self.depth_decoder.generation_config.to_diff_dict()
depth_decoder_attrs.pop("transformers_version", None)
for attr, value in depth_decoder_attrs.items():
setattr(self.generation_config, prefix + attr, value)
super().save_pretrained(*args, **kwargs)
def _merge_input_ids_with_input_values(
self,
input_ids: torch.Tensor | None = None,
input_values: torch.Tensor | None = None,
input_values_cutoffs: torch.Tensor | None = None,
labels: torch.Tensor | None = None,
) -> torch.Tensor | None:
"""
Merges the input_ids and input_values to produce a single inputs_embeds tensor:
1 - Infers the codec model on the input_values to retrieve codebook token.
2 - Embeds codebook tokens and places them at the correct positions in the inputs_embeds tensor.
3 - If labels are provided, expands them to match codebook dimensions and position the target codebook tokens in the inputs_embeds tensor.
Args:
input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
The input ids to embed.
input_values (`torch.Tensor` of shape `(batch_size, channels, audio_sequence_length)`):
The audio input values to embed.
input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`):
The cutoffs of the audio input values relative to its batch index, padded with -1 when no audio.
"""
inputs_embeds = self.embed_text_tokens(input_ids)
if input_values is not None:
# infer input_values_mask
input_values_cutoffs = nn.functional.pad(input_values_cutoffs, (1, 0))
audio_lengths = input_values_cutoffs[input_values_cutoffs >= 0].diff()
audio_lengths = audio_lengths[audio_lengths > 0]
input_values_mask = torch.arange(input_values_cutoffs.max(), device=input_values.device).expand(
len(audio_lengths), -1
)
input_values_mask = input_values_mask < audio_lengths.unsqueeze(1)
# =======================================
# TODO: @eustlb, this should be batched !!!
# but requires making sure batched inference of the codec model works as intended
with torch.no_grad():
audio_tokens_list = []
for batch_input_values, batch_input_values_cutoffs in zip(input_values, input_values_cutoffs):
batch_input_values_cutoffs = batch_input_values_cutoffs[batch_input_values_cutoffs >= 0]
for i in range(batch_input_values_cutoffs.shape[0] - 1):
start_idx = batch_input_values_cutoffs[i]
end_idx = batch_input_values_cutoffs[i + 1]
audio_batch = batch_input_values[..., start_idx:end_idx]
codec_outputs = self.codec_model.encode(audio_batch.unsqueeze(0))
codebook_ids = codec_outputs.audio_codes.transpose(1, -1)
audio_tokens_list.append(codebook_ids[0])
max_audio_frames = max(el.shape[0] for el in audio_tokens_list)
batched_audio_token_ids = torch.stack(
[nn.functional.pad(el, (0, 0, 0, max_audio_frames - el.shape[0])) for el in audio_tokens_list]
)
audio_codes_mask = self.codec_model.get_audio_codes_mask(input_values_mask)
# =======================================
audio_token_id = self.config.audio_token_id
audio_token_mask = input_ids == audio_token_id
audio_embeds = self.backbone_model.embed_tokens(batched_audio_token_ids)
inputs_embeds[audio_token_mask] = audio_embeds[audio_codes_mask]
# same for the audio eos token
audio_eos_frame_ids = (
torch.ones((1, 1, self.config.num_codebooks), device=input_ids.device, dtype=torch.long)
* self.config.codebook_eos_token_id
)
audio_eos_embeds = self.backbone_model.embed_tokens(audio_eos_frame_ids).squeeze(1)
audio_eos_token_mask = input_ids == self.config.audio_eos_token_id
inputs_embeds[audio_eos_token_mask] = audio_eos_embeds.repeat(audio_eos_token_mask.sum(), 1)
# if the labels are provided, we need to expand the labels to (batch_size, seq_length, num_codebooks)
if labels is not None:
labels_expanded = labels.unsqueeze(-1).repeat(1, 1, self.config.num_codebooks)
labels_expanded[audio_token_mask] = batched_audio_token_ids[audio_codes_mask]
labels_expanded[audio_eos_token_mask] = audio_eos_frame_ids
# mask depth decoder
depth_decoder_ignore_frames_idxs = (labels == -101).nonzero(as_tuple=True)
labels_expanded[depth_decoder_ignore_frames_idxs[0], depth_decoder_ignore_frames_idxs[1], 1:] = -100
labels = labels_expanded
return {"inputs_embeds": inputs_embeds, "labels": labels}
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
next_sequence_length: int | None = None,
past_key_values: Cache | None = None,
attention_mask: torch.LongTensor | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids=input_ids,
next_sequence_length=next_sequence_length,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
if input_ids is not None and input_ids.ndim == 2 and model_inputs.get("inputs_embeds") is None:
merged_inputs = self._merge_input_ids_with_input_values(
input_ids=input_ids,
input_values=kwargs.get("input_values"),
input_values_cutoffs=kwargs.get("input_values_cutoffs"),
labels=kwargs.get("labels"),
)
model_inputs.update(
{"inputs_embeds": merged_inputs["inputs_embeds"], "labels": merged_inputs["labels"], "input_ids": None}
)
return model_inputs
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
input_values: torch.Tensor | None = None,
attention_mask: torch.Tensor | None = None,
input_values_cutoffs: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | CsmOutputWithPast:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks) or (batch_size, sequence_length)`):
1. (batch_size, sequence_length): corresponds to the input sequence prepared with the processor from the text prompt. Such input
requires `input_values` to be provided so that audio can be encoded in codebook tokens and then merged with the text tokens.
2. (batch_size, sequence_length, num_codebooks): codebook tokens generated during the autoregressive decoding. Such input is not meant to be used by end users.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`, *optional*):
Specify the end positions of audio segments within each batch entry, relative to the concatenated audio input.
If a batch entry has fewer segments than the maximum, it is padded with -1. For example, in a batch of 2 sequences
where the first contains 2 audio segments of length l1, and the second contains 1 audio segment of length l2,
the input_values_cutoffs would be: [[l1, 2 * l1], [l2, -1]].
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[config.audio_token_id, -100, -101]`.
Requires targeted `input_values` to be provided as audio tokens will be inferred from it using the `codec_model`.
- `config.audio_token_id` indicates an audio frames (considering sequence length elements as frames)
- `-100` will be ignored in the loss computation
- `-101` indicates the audio frame will be used only for the backbone model (using the first codebook token as labels)
Such labels can be prepared using `output_labels=True` when calling [`CsmProcessor`].
logits_to_keep (`int` or `torch.Tensor`, *optional*):
Kept for compatibility. Does not support another value than:
1. `0`, which is equivalent to keeping all logits, used in the training regime
2. `1`, which is equivalent to keeping only the last logit, used in the generation regime
Example:
```python
>>> import torch
>>> from transformers import CsmForConditionalGeneration, AutoProcessor
>>> from datasets import load_dataset, Audio
>>> model_id = "sesame/csm-1b"
>>> torch_device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
>>> # ensure the audio is 24kHz
>>> ds = ds.cast_column("audio", Audio(sampling_rate=24000))
>>> conversation = []
>>> # prepare a conversation with text and corresponding audio
>>> for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]):
... conversation.append(
... {
... "role": f"{speaker_id}",
... "content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}],
... }
... )
>>> inputs = processor.apply_chat_template(
... conversation,
... tokenize=True,
... return_dict=True,
... output_labels=True,
... ).to(torch_device)
>>> model = CsmForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
>>> output = model(**inputs)
>>> output.loss.backward()
```"""
if input_ids is not None and input_ids.ndim == 2:
merged_inputs = self._merge_input_ids_with_input_values(
input_ids, input_values, input_values_cutoffs, labels
)
inputs_embeds = merged_inputs["inputs_embeds"]
labels = merged_inputs["labels"]
input_ids = None
backbone_outputs = self.backbone_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
backbone_hidden_states = backbone_outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
backbone_logits = self.lm_head(backbone_hidden_states[:, slice_indices, :])
loss = None
backbone_loss = None
depth_decoder_loss = None
depth_decoder_outputs = None
if labels is not None:
# select first codebook as labels for the backbone model
backbone_labels = labels[:, :, 0]
backbone_loss = self.loss_function(
logits=backbone_logits, labels=backbone_labels, vocab_size=self.config.vocab_size, **kwargs
)
# for the depth decoder, we need to select the frames to train on
# those are frames where the label is not uniformly `ignore_index` along the codebook dimension
train_mask = ~(labels[:, :, 1:] == -100).all(dim=-1)
depth_decoder_input_ids = labels[train_mask][..., : self.config.num_codebooks - 1]
# add place holder in position 0 that will be replaced by the backbone_last_hidden_state
depth_decoder_input_ids = nn.functional.pad(depth_decoder_input_ids, (1, 0), value=0)
train_idxs = train_mask.nonzero(as_tuple=True)
backbone_last_hidden_states = backbone_hidden_states[train_idxs[0], train_idxs[1] - 1, :]
depth_decoder_labels = labels[train_mask]
depth_decoder_outputs = self.depth_decoder(
input_ids=depth_decoder_input_ids,
backbone_last_hidden_state=backbone_last_hidden_states,
use_cache=use_cache,
return_dict=True,
labels=depth_decoder_labels,
**kwargs,
)
depth_decoder_loss = depth_decoder_outputs.loss
loss = backbone_loss + depth_decoder_loss
return CsmOutputWithPast(
loss=loss,
backbone_loss=backbone_loss,
depth_decoder_loss=depth_decoder_loss,
logits=backbone_logits,
past_key_values=backbone_outputs.past_key_values,
hidden_states=backbone_outputs.hidden_states,
attentions=backbone_outputs.attentions,
depth_decoder_logits=depth_decoder_outputs.logits if depth_decoder_outputs is not None else None,
depth_decoder_past_key_values=depth_decoder_outputs.past_key_values
if depth_decoder_outputs is not None
else None,
depth_decoder_hidden_states=depth_decoder_outputs.hidden_states
if depth_decoder_outputs is not None
else None,
depth_decoder_attentions=depth_decoder_outputs.attentions if depth_decoder_outputs is not None else None,
)
__all__ = [
"CsmPreTrainedModel",
"CsmBackboneModel",
"CsmDepthDecoderModel",
"CsmDepthDecoderForCausalLM",
"CsmForConditionalGeneration",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/csm/modular_csm.py",
"license": "Apache License 2.0",
"lines": 655,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/csm/processing_csm.py | # Copyright 2025 Sesame and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from pathlib import Path
from typing import Any
import numpy as np
from ...utils import auto_docstring, is_soundfile_available, is_torch_available
if is_torch_available():
import torch
if is_soundfile_available():
import soundfile as sf
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import AudioKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
class CsmAudioKwargs(AudioKwargs, total=False):
"""
encoded_length_kwargs (`dict[str, Any]`, *optional*):
Dictionary of keyword arguments used to compute the encoded audio sequence length. This includes parameters
such as `kernel_sizes`, `strides`, `dilations`, and `use_causal_conv` that define the convolutional layers
used in audio encoding. The encoded length is used to determine how many audio tokens to generate for each
audio input in the text sequence.
"""
encoded_length_kwargs: dict[str, Any] | None
class CsmProcessorKwargs(ProcessingKwargs, total=False):
audio_kwargs: CsmAudioKwargs
_defaults = {
"text_kwargs": {
"padding": True,
"padding_side": "left",
"add_special_tokens": False,
},
"audio_kwargs": {
"encoded_length_kwargs": {
"kernel_sizes": [7, 3, 1, 8, 3, 1, 10, 3, 1, 12, 3, 1, 16, 3, 4],
"strides": [1, 1, 1, 4, 1, 1, 5, 1, 1, 6, 1, 1, 8, 1, 2],
"dilations": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
"use_causal_conv": True,
},
"sampling_rate": 24000,
},
"common_kwargs": {"return_tensors": "pt"},
}
@auto_docstring
class CsmProcessor(ProcessorMixin):
def __init__(
self,
feature_extractor,
tokenizer,
chat_template=None,
):
if not hasattr(tokenizer, "audio_token"):
self.audio_token = "<|AUDIO|>"
self.audio_token_id = tokenizer.convert_tokens_to_ids(self.audio_token)
else:
self.audio_token = tokenizer.audio_token
self.audio_token_id = tokenizer.audio_token_id
if not hasattr(tokenizer, "audio_eos_token"):
self.audio_eos_token = "<|audio_eos|>"
self.audio_eos_token_id = tokenizer.convert_tokens_to_ids(self.audio_eos_token)
else:
self.audio_eos_token = tokenizer.audio_eos_token
self.audio_eos_token_id = tokenizer.audio_eos_token_id
super().__init__(feature_extractor, tokenizer, chat_template=chat_template)
@staticmethod
def _get_encoded_length(audio_length, kernel_sizes=None, strides=None, dilations=None, use_causal_conv=None):
"""
Compute the length of the encoded audio sequence.
Args:
audio_length (int): The length of the audio sequence.
kernel_sizes (list[int]): The kernel sizes for the convolutional layers.
strides (list[int]): The strides for the convolutional layers.
use_causal_conv (bool): Whether to use causal convolutions.
"""
cur_length = audio_length
if kernel_sizes is None or strides is None or dilations is None or use_causal_conv is None:
return cur_length
for kernel_size, stride, dilation in zip(kernel_sizes, strides, dilations):
effective_kernel_size = (kernel_size - 1) * dilation + 1
padding_total = kernel_size - stride
padding_right = padding_total // 2
padding_left = padding_total - padding_right
n_frames = (cur_length - effective_kernel_size + padding_total) / stride + 1
n_frames = math.ceil(n_frames) - 1
ideal_length = n_frames * stride + kernel_size - padding_total
extra_padding = ideal_length - cur_length
if use_causal_conv:
padding_left = padding_total
padding_right = extra_padding
else:
padding_right = padding_right + extra_padding
cur_length = cur_length + padding_left + padding_right
cur_length = (cur_length - dilation * (kernel_size - 1) - 1) // stride + 1
return cur_length
def save_audio(
self,
audio: AudioInput,
saving_path: str | Path | list[str | Path],
**kwargs: Unpack[CsmProcessorKwargs],
):
# TODO: @eustlb, this should be in AudioProcessor
if not is_soundfile_available():
raise ImportError("Please install `soundfile` to save audio files.")
# ensure correct audio input
audio = make_list_of_audio(audio)
# ensure correct saving path
if isinstance(saving_path, (str, Path)):
saving_path = [saving_path]
elif not (isinstance(saving_path, (list, tuple)) and all(isinstance(p, (str, Path)) for p in saving_path)):
raise ValueError("Invalid input path. Please provide a string, or a list of strings")
if len(audio) != len(saving_path):
raise ValueError("The number of audio and saving paths must be the same")
output_kwargs = self._merge_kwargs(
CsmProcessorKwargs,
**kwargs,
)
audio_kwargs = output_kwargs["audio_kwargs"]
sampling_rate = audio_kwargs["sampling_rate"]
for audio_value, p in zip(audio, saving_path):
if isinstance(audio_value, torch.Tensor):
audio_value = audio_value.cpu().float().numpy()
sf.write(p, audio_value, sampling_rate)
@auto_docstring
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] | None,
audio: AudioInput | None = None,
output_labels: bool | None = False,
depth_decoder_labels_ratio: float | None = 1.0,
**kwargs: Unpack[CsmProcessorKwargs],
):
r"""
output_labels (bool, *optional*, default=False):
Whether to return labels for training. Indices will be in `[config.audio_token_id, -100, -101]`.
- `config.audio_token_id` indicates an audio frame (considering sequence length elements as frames)
- `-100` will be ignored in the loss computation
- `-101` indicates the audio frame will be used only for the backbone model (using the first codebook token as labels)
depth_decoder_labels_ratio (float, *optional*, default=1.0):
The ratio of audio frames to keep for the depth decoder labels.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **input_values** -- List of audio values to be fed to a model. Returned when `audio` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **labels** -- List of labels for the audio frames. Returned when `output_labels=True`.
"""
output_kwargs = self._merge_kwargs(
CsmProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
text_kwargs = output_kwargs["text_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors", None)
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
n_audio_in_text = [t.count(self.audio_token) for t in text]
n_audio = 0
if audio is not None:
audio = make_list_of_audio(audio)
n_audio = len(audio)
if sum(n_audio_in_text) > 0 and n_audio != sum(n_audio_in_text):
if audio is None:
raise ValueError("No audio were provided, but there are audio tokens in the prompt")
else:
raise ValueError(
f"The number of audio tokens in each text ({n_audio_in_text}) should be the same as the "
f"number of provided audios ({n_audio})."
)
if audio is not None:
encoded_length_kwargs = audio_kwargs.pop("encoded_length_kwargs", {})
num_audio_tokens_list = [
self._get_encoded_length(audio_array.shape[-1], **encoded_length_kwargs) for audio_array in audio
]
num_audio_tokens_list_copy = num_audio_tokens_list.copy()
# expand the text to repeat the audio token for the corresponding number of frames
expanded_text = []
for sample in text:
replace_str = []
while self.audio_token in sample:
num_audio_tokens = num_audio_tokens_list_copy.pop(0)
expanded_audio_token = self.audio_token * num_audio_tokens
replace_str.append(expanded_audio_token)
sample = sample.replace(self.audio_token, "<placeholder>", 1)
while "<placeholder>" in sample:
sample = sample.replace("<placeholder>", replace_str.pop(0), 1)
expanded_text.append(sample)
text = expanded_text
encoding = self.tokenizer(text, **text_kwargs)
data = {}
data.update(encoding)
if audio is not None:
audio_kwargs.pop("return_attention_mask", None) # not supported by the feature extractor
concatenated_audio, input_values_cutoffs = [], []
offset = 0
for n_audio in n_audio_in_text:
if n_audio == 0:
concatenated_audio.append(np.zeros(0))
input_values_cutoffs.append(torch.tensor([-1]))
else:
concatenated_audio.append(
np.concatenate(
[
el.cpu().numpy() if isinstance(el, torch.Tensor) else el
for el in audio[offset : offset + n_audio]
],
axis=-1,
)
)
input_values_cutoffs.append(
torch.tensor([el.shape[-1] for el in audio[offset : offset + n_audio]]).cumsum(dim=-1)
)
offset += n_audio
audio_inputs = self.feature_extractor(concatenated_audio, **audio_kwargs)
audio_inputs.pop("padding_mask", None) # not applicable here
data.update(audio_inputs)
# pad and stack the audio cut idxs
max_len = max(cut_idxs.shape[-1] for cut_idxs in input_values_cutoffs)
input_values_cutoffs = [
torch.nn.functional.pad(cut_idxs, (0, max_len - cut_idxs.shape[-1]), value=-1)
for cut_idxs in input_values_cutoffs
]
data["input_values_cutoffs"] = torch.stack(input_values_cutoffs, dim=0)
if output_labels:
audio_frame_idxs = (data["input_ids"] == self.audio_token_id).nonzero()
n_audio_frames = audio_frame_idxs.shape[0]
if depth_decoder_labels_ratio <= 1.0:
rand_idxs = torch.randperm(n_audio_frames)[: int(n_audio_frames * (1 - depth_decoder_labels_ratio))]
skip_frames_idxs = audio_frame_idxs[rand_idxs]
else:
skip_frames_idxs = audio_frame_idxs
labels = torch.where(
(data["input_ids"] == self.audio_token_id) | (data["input_ids"] == self.audio_eos_token_id),
data["input_ids"],
-100,
)
labels[skip_frames_idxs[:, 0], skip_frames_idxs[:, 1]] = -101
data["labels"] = labels
return BatchFeature(data=data, tensor_type=return_tensors)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
feature_extractor_input_names = self.feature_extractor.model_input_names
# Remove `padding_mask`, it is popped and not used when processing. Make a copy of list when removing
# otherwise `self.feature_extractor.model_input_names` is also modified
feature_extractor_input_names = [name for name in feature_extractor_input_names if name != "padding_mask"]
return list(tokenizer_input_names + feature_extractor_input_names + ["input_values_cutoffs"])
__all__ = ["CsmProcessor"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/csm/processing_csm.py",
"license": "Apache License 2.0",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/csm/test_modeling_csm.py | # Copyright 2024, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ConversationalSpeechModel model."""
import copy
import unittest
import pytest
from parameterized import parameterized
from transformers import (
AutoProcessor,
CsmConfig,
CsmForConditionalGeneration,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_torch_accelerator,
slow,
torch_device,
)
from transformers.utils.import_utils import is_datasets_available
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
ids_tensor,
)
if is_datasets_available():
from datasets import load_dataset
if is_torch_available():
import torch
class CsmModelTester:
def __init__(
self,
parent,
ignore_index=-100,
batch_size=3,
seq_length=7,
is_training=True,
depth_decoder_config={
"num_codebooks": 10,
"backbone_hidden_size": 64,
"vocab_size": 6,
"hidden_size": 64,
"intermediate_size": 128,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 10,
},
codec_config={
"model_type": "mimi",
"audio_channels": 1,
"chunk_in_sec": None,
"hidden_size": 32,
"num_filters": 8,
"num_residual_layers": 1,
"upsampling_ratios": [8, 4],
"codebook_size": 64,
"vector_quantization_hidden_dimension": 64,
"upsample_groups": 32,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"sliding_window": 4,
"codebook_dim": 64,
"use_cache": False,
},
config={
"num_codebooks": 10,
"vocab_size": 6,
"text_vocab_size": 99,
"hidden_size": 64,
"intermediate_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 10,
"bos_token_id": 1,
"pad_token_id": 2,
"eos_token_id": 3,
"codebook_pad_token_id": 2,
"codebook_eos_token_id": 3,
},
):
self.parent = parent
self.is_training = is_training
self.ignore_index = ignore_index
self.depth_decoder_config = depth_decoder_config
self.codec_config = codec_config
self.config = config
self.seq_length = seq_length
self.batch_size = batch_size
self.num_hidden_layers = config["num_hidden_layers"]
self.vocab_size = config["vocab_size"]
self.hidden_size = config["hidden_size"]
self.num_attention_heads = config["num_attention_heads"]
self.pad_token_id = config["pad_token_id"]
def get_config(self):
return CsmConfig(
depth_decoder_config=self.depth_decoder_config,
codec_config=self.codec_config,
**self.config,
)
def prepare_config_and_inputs(self):
config = self.get_config()
input_ids = ids_tensor([self.batch_size, self.seq_length, config.num_codebooks], config.vocab_size - 1) + 1
attention_mask = input_ids[..., -1].ne(1).to(torch_device)
return config, input_ids, attention_mask
def prepare_config_and_inputs_for_common(self):
config, input_ids, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
class CsmForConditionalGenerationTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (CsmForConditionalGeneration,) if is_torch_available() else ()
test_resize_embeddings = False
test_resize_embeddings_untied = False
def setUp(self):
self.model_tester = CsmModelTester(self)
self.config_tester = ConfigTester(self, config_class=CsmConfig)
def test_config(self):
self.config_tester.run_common_tests()
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
"""
Overrides [ModelTesterMixin._prepare_for_class] to handle third input_ids dimension.
"""
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
inputs_dict["labels"] = torch.zeros(
(
self.model_tester.batch_size,
self.model_tester.seq_length,
self.model_tester.config["num_codebooks"],
),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def _get_logits_processor_kwargs(self, do_sample=False, config=None):
"""
Overrides [GenerationTesterMixin._get_logits_processor_kwargs] to restrict to top_k, top_p, and temperature sampling.
"""
logits_processor_kwargs = {}
if do_sample:
logits_processor_kwargs.update(
{
"top_k": 10,
"top_p": 0.7,
"temperature": 0.7,
}
)
return logits_processor_kwargs
def _check_similar_generate_outputs(self, output_1, output_2, atol=1e-5, rtol=1e-5):
"""
Overrides [GenerationTesterMixin._check_similar_generate_outputs] to handle third input_ids dimension.
Here we only look a the first codebook (index 0 on last dimension of the generated sequences) since returned scores
are for this token.
"""
# scores doesn't include data regarding decoder input tokens
decoder_input_length = output_1.sequences.shape[1] - len(output_1.scores)
output_matches = output_1.sequences[..., 0] == output_2.sequences[..., 0]
has_matching_outputs = output_matches.all()
has_matching_scores = None
if not has_matching_outputs:
for batch_idx in range(output_1.sequences.shape[0]):
batch_matches = output_matches[batch_idx]
if batch_matches.all():
continue
first_mismatch_idx = batch_matches.int().argmin() # gets the index of the first False
first_mismatch_idx -= decoder_input_length
output_1_first_mismatch_scores = output_1.scores[first_mismatch_idx][batch_idx]
output_2_first_mismatch_scores = output_2.scores[first_mismatch_idx][batch_idx]
has_matching_scores = torch.allclose(
output_1_first_mismatch_scores, output_2_first_mismatch_scores, rtol=atol, atol=rtol
)
if not has_matching_scores:
break
self.assertTrue(has_matching_outputs or has_matching_scores)
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip(reason="CSM does not support assisted decoding.")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support assisted decoding.")
def test_assisted_decoding_sample(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_beam_sample_generate(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_beam_search_generate(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_beam_search_generate_dict_output(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_beam_sample_generate_dict_output(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support prompt lookup decoding.")
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support prompt lookup decoding.")
def test_prompt_lookup_decoding_stops_at_eos(self):
pass
@pytest.mark.skip(reason="CSM has custom embedding approach (text and audio embeddings).")
def test_model_get_set_embeddings(self):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_generate_from_inputs_embeds_1_beam_search(self, _, num_beams):
pass
@pytest.mark.generate
@unittest.skip(reason="CSM does not support beam search.")
def test_model_parallel_beam_search(self):
pass
@unittest.skip(reason="CSM has special embeddings that can never be tied")
def test_tied_weights_keys(self):
pass
@unittest.skip(reason="CSM has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
def _get_custom_4d_mask_test_data(self):
"""
Overrides [ModelTesterMixin._get_custom_4d_mask_test_data] to handle third input_ids dimension.
"""
# Sequence in which all but the last token is the same
input_ids = torch.tensor([[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5]], device=torch_device, dtype=torch.int64)
input_ids = input_ids.unsqueeze(-1).expand(-1, -1, self.model_tester.config["num_codebooks"])
position_ids = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64)
# Combining common prefix with the unique ending tokens:
input_ids_shared_prefix = torch.cat([input_ids[0][:-1], input_ids[:, -1]]).unsqueeze(0)
# Creating a 4D mask where each of the last 3 tokens do not attend to each other.
mask_shared_prefix = torch.tensor(
[
[
[
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 0],
[1, 1, 1, 0, 0, 1],
]
]
],
)
# inverting the attention mask
mask_dtype = torch.float32
min_dtype = torch.finfo(mask_dtype).min
mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=mask_dtype, device=torch_device) * min_dtype
# Creating a position_ids tensor. note the repeating figures in the end.
position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64)
return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix
class CsmForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
# TODO: @eustlb, update with correct sesame's repo
self.model_checkpoint = "sesame/csm-1b"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def _load_conversation(self):
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
ds = ds.filter(lambda x: x["conversation_id"] == 0)
ds = ds.sort("turn_id")
return ds[0]
@slow
@require_torch_accelerator
def test_1b_model_integration_generate(self):
"""
Tests the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/d25577a357ddcf8f4a8cd0d00baca551, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
prompt = "<|begin_of_text|>[0]What are you working on?<|end_of_text|><|AUDIO|><|audio_eos|><|begin_of_text|>[1]I'm figuring out my budget.<|end_of_text|>"
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
audio = ds[0]["audio"]["array"]
inputs = processor(text=prompt, audio=audio, return_tensors="pt").to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[
[1140, 1818, 86, 1072, 1029, 1010, 796, 577, 1523, 1599, 902, 1308, 817, 232, 1860, 56, 327, 1399, 1069, 1014, 1980, 53, 407, 1841, 1559, 928, 972, 1432, 832, 1007, 1325, 371],
[955, 1390, 1503, 861, 265, 1753, 91, 1690, 389, 1025, 1086, 495, 1192, 1334, 773, 1277, 957, 1388, 513, 1110, 539, 349, 1865, 1515, 806, 1514, 237, 1424, 1783, 1928, 523, 1925],
[1925, 190, 654, 1538, 19, 37, 1923, 100, 1909, 1156, 1847, 1901, 975, 982, 2002, 544, 1933, 311, 79, 850, 238, 1034, 428, 1231, 764, 313, 973, 269, 1669, 1058, 1641, 891],
[1721, 92, 1298, 989, 1868, 154, 386, 1115, 347, 384, 853, 1439, 970, 1369, 238, 1279, 268, 595, 2010, 1861, 723, 999, 578, 1612, 69, 121, 306, 1647, 1609, 1185, 1786, 1268],
[1356, 1419, 1199, 1575, 418, 53, 1140, 805, 355, 324, 633, 199, 343, 1176, 784, 41, 268, 366, 1478, 466, 1591, 305, 1298, 1335, 1866, 1563, 1503, 1558, 1468, 852, 1244, 312],
[1860, 1603, 546, 1805, 607, 160, 1528, 191, 1867, 1830, 861, 661, 1740, 1276, 218, 954, 1286, 1216, 1727, 1637, 983, 597, 1857, 65, 797, 947, 427, 476, 739, 978, 107, 1394],
[1165, 1775, 177, 823, 100, 370, 521, 200, 2007, 434, 1444, 1205, 819, 1278, 31, 912, 150, 1546, 2035, 1147, 559, 1995, 639, 35, 1812, 56, 1485, 2003, 1573, 1693, 1762, 1313],
[1932, 704, 907, 897, 56, 1587, 990, 1905, 2007, 256, 671, 868, 282, 1731, 460, 1055, 1309, 1880, 584, 1849, 1643, 1198, 310, 361, 789, 1657, 905, 1564, 1354, 110, 915, 1011],
[1437, 1958, 1483, 313, 79, 28, 859, 397, 1783, 1693, 633, 1424, 1128, 1831, 605, 1123, 1496, 739, 1177, 498, 781, 1756, 1288, 890, 224, 1875, 279, 800, 1999, 1740, 348, 1420],
[724, 870, 1344, 861, 429, 522, 1877, 1689, 771, 1468, 1952, 156, 856, 462, 18, 834, 33, 840, 1136, 2012, 1766, 1891, 2034, 1731, 624, 108, 1469, 653, 1344, 1682, 407, 515],
[355, 26, 36, 1700, 1032, 293, 1799, 978, 944, 296, 1333, 1377, 664, 1249, 421, 516, 1178, 531, 1587, 899, 1, 1449, 934, 942, 1604, 1208, 1889, 710, 825, 2012, 1563, 1299],
[629, 15, 551, 861, 310, 918, 149, 1689, 1464, 1950, 1900, 1502, 1503, 615, 477, 1090, 1556, 1393, 1143, 1112, 1934, 416, 1604, 1470, 1501, 1594, 903, 1400, 972, 199, 1075, 1643],
[1281, 106, 1162, 1313, 115, 429, 1792, 1379, 1535, 1311, 743, 484, 333, 498, 547, 699, 1075, 1861, 1038, 1352, 166, 622, 759, 1398, 241, 138, 1330, 481, 1254, 1365, 985, 423],
[9, 520, 323, 25, 1873, 716, 1414, 1413, 266, 1449, 1265, 290, 1341, 836, 674, 411, 913, 911, 637, 1038, 1097, 1158, 1009, 803, 737, 154, 1388, 938, 466, 725, 1216, 1549],
[1944, 15, 62, 332, 540, 689, 106, 1805, 1303, 1787, 1724, 1011, 1515, 1442, 1197, 496, 2026, 1820, 906, 372, 322, 1413, 1305, 1674, 443, 1733, 828, 905, 1116, 1850, 1870, 786],
[221, 220, 1093, 1790, 759, 1266, 1169, 1379, 572, 1859, 1155, 596, 1398, 412, 1788, 1963, 167, 89, 1011, 1489, 714, 73, 486, 780, 1136, 254, 983, 138, 386, 800, 1819, 1857],
[1178, 1939, 107, 1605, 582, 1256, 420, 637, 648, 1023, 1809, 978, 1703, 278, 1668, 2044, 1599, 1321, 1670, 1716, 1155, 56, 602, 877, 886, 220, 910, 797, 1028, 1226, 869, 811],
[1432, 1926, 1197, 1687, 540, 1815, 658, 1080, 1162, 192, 315, 1713, 422, 586, 65, 947, 493, 1536, 13, 505, 1269, 456, 1042, 645, 512, 1394, 1124, 590, 1058, 1896, 1055, 1537],
[905, 564, 1739, 1594, 1201, 1773, 738, 994, 239, 1686, 1528, 368, 1791, 1924, 607, 44, 1320, 552, 1862, 1578, 591, 1434, 330, 1576, 1946, 1233, 113, 445, 669, 2041, 1242, 1406],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_1b_model_integration_generate_no_audio(self):
"""
Tests the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/aed822f765e928b9612e01b0d8836d69, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
conversation = [
{"role": "0", "content": [{"type": "text", "text": "The past is just a story we tell ourselves."}]},
]
inputs = processor.apply_chat_template(conversation, tokenize=True, return_dict=True).to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
print(output_tokens)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[
[1656, 629, 723, 1785, 206, 1873, 1059, 1190, 1833, 240, 618, 350, 156, 109, 2010, 452, 435, 1764, 77, 654, 1133, 908, 1095, 74, 804, 494, 1760, 1343, 1312, 1464, 1657, 324],
[366, 1532, 1945, 21, 145, 1428, 1417, 1987, 1793, 1444, 356, 1491, 849, 333, 788, 426, 1423, 1004, 414, 1823, 1169, 257, 1892, 696, 1572, 998, 1098, 523, 390, 1977, 546, 1692],
[1343, 1382, 1288, 1744, 1685, 1154, 1837, 1156, 1680, 1641, 1479, 1548, 632, 824, 694, 2010, 671, 1251, 1822, 343, 638, 1372, 696, 1272, 144, 125, 1332, 579, 936, 77, 159, 357],
[456, 1534, 349, 274, 1956, 1502, 1268, 1038, 1911, 523, 1360, 1159, 761, 293, 718, 1143, 63, 705, 168, 550, 413, 1372, 1771, 787, 631, 693, 784, 1789, 2039, 1131, 1601, 918],
[456, 829, 2026, 1108, 1649, 207, 1308, 1440, 1192, 1394, 426, 546, 590, 36, 1682, 1827, 1387, 1425, 1909, 1500, 1438, 1297, 5, 888, 948, 1745, 1304, 1364, 1692, 131, 300, 1908],
[2027, 1431, 1037, 1789, 1296, 1264, 1331, 1787, 1235, 1902, 1161, 1591, 590, 561, 1633, 1218, 510, 148, 1962, 118, 212, 608, 565, 1869, 583, 598, 532, 658, 1416, 9, 1172, 493],
[1215, 460, 1722, 317, 1423, 716, 1589, 1177, 1927, 1860, 1756, 1552, 1674, 643, 74, 1256, 587, 1742, 771, 2028, 469, 1070, 1683, 1614, 699, 494, 2020, 139, 1365, 1171, 171, 904],
[1615, 339, 323, 317, 469, 714, 104, 2015, 1407, 278, 468, 77, 2007, 650, 1630, 269, 168, 934, 1544, 58, 1487, 1373, 705, 874, 1252, 2031, 1995, 254, 1334, 1171, 1911, 1607],
[1259, 693, 666, 1700, 1115, 607, 982, 769, 1106, 1500, 101, 88, 1698, 1864, 1358, 1594, 192, 153, 1868, 1654, 604, 1948, 526, 778, 172, 1664, 1966, 99, 1334, 1030, 1349, 1209],
[1211, 579, 1369, 492, 1725, 203, 1125, 778, 701, 1982, 1420, 155, 736, 1145, 2018, 609, 658, 561, 1147, 923, 1794, 1753, 116, 1374, 612, 956, 1587, 392, 1062, 2047, 901, 1931],
[460, 1093, 1346, 1917, 1223, 470, 271, 390, 547, 112, 143, 1633, 1030, 643, 96, 1759, 920, 1959, 75, 1280, 1630, 999, 333, 853, 1110, 1291, 1911, 57, 171, 1658, 1704, 1508],
[908, 500, 393, 184, 1437, 482, 2008, 1834, 356, 1435, 1550, 1407, 1236, 109, 1167, 452, 1141, 934, 207, 957, 660, 670, 28, 1066, 1252, 1932, 669, 906, 1904, 1820, 2043, 881],
[1599, 1031, 1474, 336, 1540, 571, 437, 1440, 1616, 1365, 1412, 1246, 400, 405, 1776, 96, 296, 38, 1597, 466, 1630, 1256, 1940, 887, 1769, 294, 285, 842, 1756, 1619, 451, 1529],
[1615, 339, 1722, 525, 942, 105, 1365, 670, 785, 1316, 465, 1860, 438, 968, 547, 1938, 1816, 1429, 1065, 1942, 660, 1446, 1093, 1066, 931, 121, 688, 1033, 1178, 754, 1783, 94],
[912, 1354, 598, 254, 341, 1980, 1166, 585, 1302, 473, 554, 242, 174, 2030, 2011, 325, 978, 1690, 258, 396, 1831, 1768, 1291, 1699, 2001, 433, 1414, 2012, 1045, 511, 533, 1104],
[80, 1791, 1062, 1136, 391, 568, 1651, 101, 959, 2043, 1683, 760, 794, 181, 570, 540, 1599, 20, 1017, 973, 1654, 396, 586, 778, 2044, 1664, 1911, 929, 66, 897, 510, 643],
[1161, 1093, 161, 1296, 589, 54, 906, 981, 1927, 605, 516, 1731, 1461, 1204, 1902, 920, 1488, 177, 805, 1402, 610, 1446, 1154, 1067, 2025, 645, 762, 1715, 415, 1658, 1713, 1607],
[374, 1444, 1577, 792, 1450, 628, 604, 1729, 322, 514, 1725, 540, 1070, 575, 653, 800, 250, 187, 569, 349, 354, 1573, 176, 793, 897, 359, 536, 276, 1224, 23, 145, 1287],
[1184, 415, 1644, 1737, 1788, 385, 784, 1861, 1172, 1118, 367, 1156, 234, 1946, 1742, 981, 828, 1798, 1821, 361, 1148, 670, 518, 1288, 761, 1050, 1642, 1006, 1747, 840, 1599, 720],
[1141, 1731, 1670, 1542, 1347, 1907, 683, 753, 1347, 68, 2031, 153, 556, 719, 736, 1759, 1131, 1073, 1747, 1730, 1487, 1137, 1869, 1624, 699, 1900, 748, 49, 1312, 735, 726, 1268],
[1141, 1383, 405, 1033, 490, 488, 1102, 471, 713, 1630, 447, 703, 1495, 1001, 1855, 354, 456, 411, 786, 853, 168, 407, 116, 699, 605, 128, 532, 1076, 208, 447, 1448, 1071],
[345, 1013, 948, 1728, 1837, 337, 930, 1226, 1643, 1729, 983, 1688, 2009, 435, 1358, 721, 42, 1779, 1332, 1077, 1873, 128, 1327, 125, 1226, 1704, 705, 1459, 1449, 862, 155, 1870],
[336, 904, 684, 184, 1542, 714, 1752, 1180, 1373, 1816, 504, 1716, 1066, 1086, 1212, 530, 1413, 1278, 75, 1347, 82, 1623, 1307, 1717, 1861, 494, 888, 1589, 670, 1999, 905, 1430],
[578, 554, 14, 523, 1016, 300, 1589, 1017, 356, 1583, 1654, 414, 449, 376, 1413, 58, 706, 963, 388, 1626, 131, 352, 1024, 1054, 2025, 1561, 77, 1589, 1486, 431, 1249, 1508],
[184, 2043, 169, 1673, 580, 162, 1752, 397, 1119, 2009, 697, 150, 1475, 157, 1523, 1402, 575, 86, 1373, 1230, 1564, 1308, 626, 1093, 1603, 1446, 1390, 1543, 1778, 1142, 1357, 1831],
[1484, 1987, 932, 1728, 1504, 1618, 291, 1865, 1151, 460, 1792, 141, 234, 2043, 829, 513, 435, 791, 1037, 1541, 65, 424, 1589, 1711, 312, 1306, 212, 686, 673, 984, 1914, 1549],
[513, 1536, 1844, 1319, 572, 1069, 121, 735, 1949, 1211, 1362, 1027, 105, 1379, 315, 1782, 706, 1658, 1510, 1989, 1443, 1690, 822, 1614, 1194, 1460, 992, 2040, 1178, 1474, 1110, 1326],
[1858, 194, 1594, 1935, 1622, 1892, 1577, 137, 1907, 2015, 757, 414, 1823, 836, 496, 530, 1385, 1503, 1065, 1554, 664, 525, 1031, 433, 69, 466, 1016, 1846, 1609, 1658, 911, 94],
[1134, 1744, 323, 691, 1837, 347, 1871, 172, 811, 91, 1883, 436, 1912, 23, 1336, 1684, 519, 1612, 1219, 1402, 728, 1953, 1658, 641, 27, 1340, 436, 139, 2008, 1030, 159, 324],
[1270, 1536, 1639, 414, 1387, 1170, 1067, 1701, 1414, 505, 1122, 36, 1731, 350, 1552, 1214, 1444, 30, 107, 172, 480, 1858, 655, 168, 1107, 691, 1272, 797, 1656, 548, 1407, 1375],
[1270, 286, 1371, 1552, 1622, 1739, 1348, 2018, 345, 1537, 1941, 2024, 1423, 740, 284, 513, 91, 1228, 2015, 385, 992, 39, 813, 803, 2025, 497, 663, 462, 1609, 334, 927, 1470],
[1718, 994, 265, 1421, 1622, 1098, 845, 1868, 832, 459, 447, 619, 1970, 929, 513, 63, 1448, 1509, 1219, 1942, 285, 1373, 1259, 1004, 11, 1040, 1984, 57, 188, 1687, 1475, 805],
[1157, 832, 480, 1225, 1019, 347, 326, 999, 125, 1542, 118, 1383, 1343, 1077, 1821, 1602, 1978, 1642, 618, 808, 692, 1953, 1353, 963, 619, 1291, 1016, 1458, 1995, 1688, 1872, 1718],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_1b_model_integration_generate_multiple_audio(self):
"""
Test the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/0c94de002e1325abb61d32217f74c0f8, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
conversation = []
# context
for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]):
conversation.append(
{
"role": f"{speaker_id}",
"content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}],
}
)
# text prompt
conversation.append({"role": f"{ds[4]['speaker_id']}", "content": [{"type": "text", "text": ds[4]["text"]}]})
inputs = processor.apply_chat_template(
conversation,
tokenize=True,
return_dict=True,
).to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([[
[420, 1189, 1311, 318, 359, 694, 1550, 1044, 1614, 1437, 1978, 537, 554, 1681, 147, 1225, 422, 1357, 1681, 1619, 165, 641, 1132, 1975, 1568, 406, 756, 503, 1673, 1428, 762, 781],
[1848, 1412, 957, 1656, 871, 540, 1999, 175, 711, 1383, 1814, 104, 742, 1285, 733, 1251, 1165, 1915, 1392, 645, 1804, 913, 1772, 632, 376, 1507, 1132, 725, 716, 1121, 1769, 1509],
[429, 1138, 895, 1018, 1099, 257, 1395, 1015, 576, 1599, 497, 19, 1858, 1437, 282, 357, 1143, 828, 1481, 70, 985, 551, 935, 278, 1102, 1453, 1902, 755, 526, 498, 1441, 1733],
[546, 343, 1547, 879, 2039, 692, 1999, 1150, 1969, 1866, 1178, 199, 1913, 1738, 1530, 1728, 1193, 74, 695, 612, 1095, 1597, 1381, 683, 1385, 2045, 1069, 865, 438, 70, 1437, 318],
[1741, 1621, 733, 1580, 1006, 482, 1508, 1722, 1529, 1822, 745, 552, 142, 1568, 704, 480, 214, 552, 321, 1858, 1902, 1042, 1249, 1328, 1730, 1218, 1755, 597, 670, 738, 1056, 762],
[1264, 1561, 1307, 730, 1403, 688, 212, 949, 1871, 994, 1174, 674, 858, 293, 1577, 1221, 1024, 1535, 1224, 872, 509, 1971, 46, 440, 1531, 1100, 1466, 732, 964, 381, 1933, 1612],
[1407, 982, 1665, 1247, 1636, 1546, 939, 882, 1999, 618, 484, 1632, 66, 430, 290, 327, 351, 1236, 687, 504, 1973, 1073, 1233, 1972, 82, 1655, 361, 1612, 861, 1085, 880, 1407],
[584, 637, 304, 1805, 1683, 1381, 404, 862, 1278, 916, 1695, 370, 316, 1049, 237, 1187, 1389, 300, 680, 135, 1068, 1368, 810, 1392, 103, 1459, 1051, 644, 38, 1517, 790, 646],
[471, 1984, 1333, 553, 193, 319, 1604, 1546, 153, 513, 990, 839, 1714, 1998, 984, 1882, 1055, 476, 1821, 1476, 1522, 1817, 949, 1923, 1416, 1885, 1832, 1368, 1782, 1229, 436, 918],
[28, 1238, 489, 1580, 596, 1232, 840, 835, 297, 762, 474, 1106, 1761, 483, 1165, 923, 1184, 1181, 1724, 398, 1484, 860, 1945, 665, 1925, 14, 67, 1693, 1853, 1283, 1822, 1973],
[20, 637, 253, 1254, 738, 188, 593, 1239, 1768, 1047, 1703, 1512, 1398, 464, 13, 161, 651, 1844, 666, 210, 1510, 1798, 614, 1649, 1751, 341, 808, 915, 1965, 840, 778, 950],
[1879, 2028, 1405, 694, 432, 2036, 612, 387, 1843, 1204, 1044, 8, 1538, 542, 1198, 598, 1131, 760, 1217, 901, 800, 1046, 136, 639, 1320, 618, 606, 707, 574, 1288, 1254, 198],
[1874, 937, 1063, 1341, 254, 13, 359, 888, 1837, 1246, 980, 818, 2046, 1258, 1290, 1470, 2028, 1701, 228, 1766, 51, 93, 296, 991, 1094, 1694, 156, 1207, 401, 967, 867, 211],
[1762, 426, 1749, 2004, 314, 903, 1254, 220, 1330, 1813, 534, 102, 658, 1460, 603, 1046, 402, 2005, 783, 973, 1764, 210, 1458, 803, 605, 369, 669, 352, 1964, 1549, 632, 1375],
[1577, 386, 503, 1492, 604, 405, 1329, 349, 180, 875, 329, 196, 514, 1854, 925, 159, 1428, 1300, 1510, 329, 76, 1682, 1036, 854, 695, 1097, 816, 382, 1417, 697, 1693, 194],
[1109, 848, 1385, 126, 1136, 979, 687, 130, 2045, 140, 562, 361, 921, 1706, 1060, 1723, 165, 1304, 203, 1067, 158, 692, 980, 313, 1896, 1812, 839, 837, 985, 116, 866, 1049],
[1810, 1092, 1534, 1730, 773, 2044, 1098, 1326, 85, 249, 455, 1728, 860, 443, 1841, 1885, 1698, 864, 1747, 1083, 1591, 1785, 1577, 1001, 1025, 1837, 1504, 1839, 1900, 1932, 230, 968],
[1547, 1465, 896, 794, 613, 1383, 1806, 1984, 526, 671, 100, 519, 2037, 1631, 1724, 633, 824, 994, 893, 1448, 1793, 1237, 1855, 699, 349, 143, 270, 535, 1550, 101, 22, 1311],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
@slow
@require_torch_accelerator
def test_1b_model_integration_generate_batched(self):
"""
Test the generated tokens match the ones from the original model implementation.
Such tokens are to be retrieved using https://gist.github.com/eustlb/bcc532b53161bc31da3d66cb07ae193f, which is a script that infers the original model.
"""
processor = AutoProcessor.from_pretrained(self.model_checkpoint)
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
conversation = [
[
{
"role": f"{ds[0]['speaker_id']}",
"content": [
{"type": "text", "text": ds[0]["text"]},
{"type": "audio", "path": ds[0]["audio"]["array"]},
],
},
{
"role": f"{ds[1]['speaker_id']}",
"content": [
{"type": "text", "text": ds[1]["text"]},
],
},
],
[
{
"role": f"{ds[0]['speaker_id']}",
"content": [
{"type": "text", "text": ds[0]["text"]},
],
}
],
]
inputs = processor.apply_chat_template(
conversation,
tokenize=True,
return_dict=True,
).to(torch_device)
model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device)
output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False)
# fmt: off
EXPECTED_OUTPUT_TOKENS = torch.tensor([
[
[1140, 1818, 1713, 1072, 1029, 1185, 697, 358, 220, 481, 1127, 1779, 817, 891, 958, 1058, 672, 495, 426, 1135, 236, 1440, 829, 2023, 1097, 94, 926, 1830, 114, 307, 235, 1190],
[955, 968, 696, 676, 52, 618, 0, 1818, 1285, 143, 1733, 1268, 1317, 1510, 1027, 2033, 1276, 1744, 790, 638, 1179, 1125, 650, 266, 1180, 364, 1015, 1604, 1152, 154, 178, 284],
[1925, 274, 433, 273, 1391, 1528, 1683, 1120, 976, 944, 357, 1681, 847, 1783, 546, 857, 1662, 1695, 40, 152, 2039, 1076, 994, 1743, 265, 1751, 602, 981, 483, 981, 538, 1381],
[1908, 1625, 1975, 729, 1067, 1844, 837, 1849, 224, 1223, 1037, 1188, 1428, 1977, 317, 530, 990, 1670, 766, 1411, 811, 154, 433, 1645, 1565, 1291, 1390, 49, 1160, 1464, 1911, 1961],
[1908, 566, 175, 1387, 1437, 1873, 1785, 1536, 961, 414, 406, 1753, 835, 284, 764, 1522, 1889, 1816, 840, 440, 756, 860, 1753, 516, 601, 1498, 280, 1425, 1904, 1540, 1074, 314],
[1860, 296, 1766, 361, 1155, 1675, 528, 1975, 1286, 113, 1656, 237, 372, 580, 1571, 1958, 502, 893, 1300, 261, 313, 455, 693, 1658, 654, 1585, 1723, 721, 178, 679, 908, 1077],
[1165, 1787, 1877, 1904, 85, 609, 1007, 1724, 1959, 245, 645, 463, 1321, 1695, 192, 711, 1892, 1193, 302, 1835, 69, 940, 148, 913, 110, 108, 1244, 1510, 165, 726, 745, 1746],
[1405, 1410, 186, 1569, 1214, 1920, 1946, 1907, 990, 1152, 1401, 1713, 541, 115, 423, 616, 1191, 1149, 1122, 9, 303, 195, 906, 566, 1718, 668, 1637, 1975, 51, 2005, 1260, 1672],
[1932, 780, 143, 110, 286, 1460, 1136, 1366, 1788, 446, 645, 587, 1708, 189, 1295, 526, 1667, 735, 707, 1215, 27, 834, 1865, 182, 1776, 1130, 528, 1523, 1156, 316, 492, 1666],
[1437, 364, 314, 432, 575, 1640, 529, 1128, 973, 789, 1820, 808, 1317, 1681, 347, 471, 737, 1626, 1386, 75, 433, 517, 365, 1982, 1434, 1378, 1059, 56, 1475, 653, 1507, 861],
[724, 538, 1140, 1853, 76, 402, 0, 397, 330, 1787, 1382, 682, 1134, 296, 377, 997, 705, 627, 1700, 17, 1791, 1000, 1271, 1019, 1552, 1521, 668, 534, 433, 344, 1007, 1046],
[925, 1297, 1017, 1785, 1403, 520, 1603, 1908, 665, 1827, 951, 1588, 1526, 414, 1945, 1153, 1933, 1571, 1821, 104, 179, 769, 619, 117, 56, 790, 721, 992, 1284, 1495, 1459, 823],
[629, 1208, 689, 924, 1617, 1100, 1028, 1231, 1708, 1582, 200, 2011, 1611, 1966, 1153, 1326, 2036, 1515, 884, 1790, 581, 549, 1491, 701, 973, 836, 2031, 1249, 1411, 365, 1946, 1552],
[1281, 1305, 610, 1666, 676, 544, 1788, 315, 159, 809, 1333, 1785, 1159, 1084, 1356, 318, 1933, 854, 475, 638, 1616, 1801, 1816, 1921, 283, 1745, 814, 974, 1056, 1316, 1509, 2031],
[9, 212, 1590, 163, 1289, 923, 2046, 1620, 632, 127, 963, 405, 850, 471, 1430, 108, 1845, 1196, 1928, 143, 1717, 1054, 1288, 1351, 1340, 1294, 831, 480, 1562, 2004, 483, 1776],
[221, 142, 1555, 1434, 1481, 1371, 1873, 1607, 207, 631, 1042, 1084, 472, 465, 1772, 1002, 1761, 1912, 1298, 1918, 685, 1053, 1635, 1536, 497, 55, 1432, 1394, 1512, 365, 2026, 1210],
[1741, 1923, 930, 1423, 1258, 1227, 879, 1217, 1999, 422, 420, 1832, 1660, 1542, 92, 2000, 1790, 1909, 56, 695, 704, 1752, 371, 792, 625, 328, 567, 1397, 1557, 390, 1424, 14],
[1178, 812, 577, 895, 1386, 339, 1467, 844, 235, 703, 551, 2021, 1592, 1042, 353, 621, 1672, 653, 2029, 103, 766, 182, 2016, 1921, 556, 1092, 1579, 626, 1950, 70, 1467, 850],
[1352, 472, 577, 351, 1126, 1943, 52, 2028, 430, 1017, 1136, 645, 820, 2028, 723, 1385, 1922, 323, 106, 267, 438, 1064, 202, 1249, 244, 1962, 625, 1380, 476, 924, 1221, 1854],
[905, 811, 374, 2021, 1067, 675, 927, 427, 416, 1521, 663, 77, 457, 1849, 1362, 262, 1669, 1238, 286, 102, 555, 1809, 1585, 1918, 972, 1446, 688, 523, 1904, 943, 17, 904],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1375, 203, 265, 164, 200, 1867, 976, 924, 1972, 1637, 1048, 271, 1912, 1430, 853, 1942, 260, 1642, 400, 57, 1376, 1626, 1821, 1163, 619, 777, 1076, 951, 389, 1820, 84, 1417],
[914, 527, 286, 968, 305, 1314, 805, 1703, 87, 559, 1980, 1124, 1726, 36, 1139, 618, 1628, 519, 1943, 781, 400, 1265, 438, 113, 87, 856, 465, 162, 1099, 352, 1141, 274],
[1408, 6, 126, 2009, 90, 996, 934, 134, 1857, 126, 602, 876, 1092, 1962, 1205, 828, 707, 1063, 393, 1533, 123, 1086, 1749, 1324, 1, 1763, 1707, 1191, 34, 1323, 1017, 1787],
[1000, 683, 1630, 703, 1574, 587, 25, 1049, 213, 1270, 1641, 1072, 1892, 1634, 1603, 90, 867, 2037, 1021, 715, 206, 507, 1138, 959, 1822, 1785, 280, 1100, 1660, 251, 1903, 988],
[1657, 1981, 246, 1048, 1952, 451, 305, 423, 2000, 416, 756, 1748, 7, 748, 1866, 1795, 1682, 1832, 338, 212, 1685, 518, 154, 1407, 416, 765, 776, 25, 55, 458, 612, 262],
[1034, 564, 667, 1474, 1212, 350, 712, 941, 1151, 1182, 1280, 640, 924, 1722, 1816, 458, 226, 359, 1518, 102, 1203, 459, 676, 1788, 1110, 393, 1974, 1721, 795, 1459, 798, 1723],
[742, 1616, 119, 653, 441, 679, 246, 1432, 486, 1615, 1191, 500, 650, 223, 687, 1765, 1875, 963, 1385, 863, 151, 1771, 458, 1170, 737, 1932, 785, 1954, 1067, 16, 1986, 2029],
[1437, 1078, 1767, 1452, 1392, 45, 2010, 1664, 245, 2015, 1416, 1055, 457, 985, 740, 1594, 1562, 1838, 258, 1431, 701, 604, 1813, 352, 792, 632, 21, 895, 70, 609, 850, 1599],
[983, 1961, 54, 135, 846, 711, 473, 1630, 1373, 1094, 251, 525, 632, 1014, 1594, 1594, 1752, 398, 1266, 1357, 942, 1680, 191, 874, 483, 1291, 381, 1873, 1964, 1278, 1477, 122],
[1663, 1969, 1887, 113, 145, 251, 1133, 156, 245, 1641, 209, 1322, 2037, 836, 539, 667, 940, 797, 1758, 1357, 191, 1137, 587, 1699, 27, 701, 395, 99, 1682, 876, 762, 839],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
])
# fmt: on
torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/csm/test_modeling_csm.py",
"license": "Apache License 2.0",
"lines": 523,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/models/beit/image_processing_beit_fast.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Beit."""
from typing import Optional, Union
import torch
import torchvision.transforms.v2.functional as tvF
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
is_torch_tensor,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_beit import BeitImageProcessorKwargs
@auto_docstring
class BeitImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = True
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = True
do_reduce_labels = False
valid_kwargs = BeitImageProcessorKwargs
def __init__(self, **kwargs: Unpack[BeitImageProcessorKwargs]):
super().__init__(**kwargs)
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
return labels
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: ImageInput | None = None,
**kwargs: Unpack[BeitImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: ImageInput | None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Union[str, "torch.device"] | None = None,
**kwargs: Unpack[BeitImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update({"do_normalize": False, "do_rescale": False})
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["tvF.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float] | None,
image_std: float | list[float] | None,
disable_grouping: bool | None,
return_tensors: str | TensorType | None,
**kwargs,
) -> BatchFeature:
if do_reduce_labels:
images = self.reduce_label(images)
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: list[tuple] | None = None):
"""
Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`BeitForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["BeitImageProcessorFast"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/beit/image_processing_beit_fast.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/granitemoehybrid/configuration_granitemoehybrid.py | # Copyright 2025 IBM and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GraniteMoeHybrid model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class GraniteMoeHybridConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GraniteMoeHybridConfig`]. It is used to
instantiate an GraniteMoeHybrid model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the GraniteMoeHybrid model. Defines the number of different tokens that
can be represented by the `inputs_ids` passed when calling [`GraniteMoeHybridModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Only relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
embedding_multiplier (`float`, *optional*, defaults to 1.0): embedding multiplier.
logits_scaling (`float`, *optional*, defaults to 1.0): divisor for output logits.
residual_multiplier (`float`, *optional*, defaults to 1.0): residual multiplier.
attention_multiplier (`float`, *optional*, defaults to 1.0): attention multiplier.
num_local_experts (`int`, *optional*, defaults to 8): total number of experts.
num_experts_per_tok (`int`, *optional*, defaults to 2): number of experts per token.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001): router auxiliary loss coefficient
shared_intermediate_size (`int`, *optional*, defaults to 1024): intermediate size for shared experts.
position_embedding_type (`str`, *optional*):
Positional embedding type to be used; defaults to None. Allowed options: `[None, "rope"]`
layer_types (`List`, *optional*): list of strings to be used as layer types.
Allowed choices: "mamba", "attention".
mamba_n_heads (`int`, *optional*, defaults to 128):
The number of mamba heads used.
mamba_n_groups (`int`, *optional*, defaults to 1):
The number of the mamba groups used.
mamba_d_state (`int`, *optional*, defaults to 256):
The dimension the mamba latent state space.
mamba_d_head (`int`, *optional*, defaults to `"auto"`):
Head embedding dimension size.
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel.
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size.
mamba_chunk_size (`int`, *optional*, defaults to 256):
The chunks in which to break the sequence when doing prefill/training.
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"])
of the mamba mixer block.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`):
Accepted range of time step values for clamping.
```python
>>> from transformers import GraniteMoeHybridModel, GraniteMoeHybridConfig
>>> # Initializing a GraniteMoeHybrid config
>>> configuration = GraniteMoeHybridConfig()
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "granitemoehybrid"
attribute_map = {
"layers_block_type": "layer_types",
}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int | None = 32000,
hidden_size: int | None = 4096,
intermediate_size: int | None = 11008,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = None,
hidden_act: str | None = "silu",
max_position_embeddings: int | None = 2048,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-6,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 1,
eos_token_id: int | None = 2,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
embedding_multiplier: float | None = 1.0,
logits_scaling: float | None = 1.0,
residual_multiplier: float | None = 1.0,
attention_multiplier: float | None = 1.0,
num_local_experts: int | None = 8,
num_experts_per_tok: int | None = 2,
output_router_logits: bool | None = False,
router_aux_loss_coef: float | None = 0.001,
shared_intermediate_size: int | None = 1024,
position_embedding_type: str | None = None,
layer_types: list[str] | None = None,
mamba_n_heads: int | None = 128,
mamba_n_groups: int | None = 1,
mamba_d_state: int | None = 256,
mamba_d_head: str | None = "auto",
mamba_d_conv: int | None = 4,
mamba_expand: int | None = 2,
mamba_chunk_size: int | None = 256,
mamba_conv_bias: bool | None = True,
mamba_proj_bias: bool | None = False,
time_step_min: float | None = 0.001,
time_step_max: float | None = 0.1,
time_step_limit: tuple[float, float] | None = (0.0, float("inf")),
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.embedding_multiplier = embedding_multiplier
self.logits_scaling = logits_scaling
self.residual_multiplier = residual_multiplier
self.attention_multiplier = attention_multiplier
self.attention_dropout = attention_dropout
self.num_local_experts = num_local_experts
self.num_experts_per_tok = num_experts_per_tok
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.shared_intermediate_size = shared_intermediate_size
self.position_embedding_type = position_embedding_type
self.rope_parameters = rope_parameters
mamba_intermediate = mamba_expand * hidden_size
if layer_types is not None and any(layer_type not in ["mamba", "attention"] for layer_type in layer_types):
raise ValueError("layer_types must be a list strings in [`mamba` `attention`]")
if mamba_intermediate % mamba_n_heads != 0:
raise ValueError("mamba_n_heads must divide mamba_expand * hidden_size")
# for the mamba_v2, must satisfy the following
if mamba_d_head == "auto":
mamba_d_head = mamba_intermediate // mamba_n_heads
if mamba_d_head * mamba_n_heads != mamba_intermediate:
raise ValueError("The dimensions for the Mamba head state do not match the model intermediate_size")
self.mamba_n_heads = mamba_n_heads
self.mamba_d_head = mamba_d_head
self.mamba_n_groups = mamba_n_groups
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_chunk_size = mamba_chunk_size
self.mamba_conv_bias = mamba_conv_bias
self.mamba_proj_bias = mamba_proj_bias
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_limit = tuple(time_step_limit) if time_step_limit is not None else None
self.mamba_expand = mamba_expand
self.layer_types = layer_types
self.tie_word_embeddings = tie_word_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(**kwargs)
# overwrite the function to use in `HybridMambaAttentionDynamicCache`
@property
def layers_block_type(self):
return self.layer_types if self.layer_types else ["mamba"] * self.num_hidden_layers
__all__ = ["GraniteMoeHybridConfig"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/granitemoehybrid/configuration_granitemoehybrid.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py | # Copyright 2025 IBM and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache
from ...masking_utils import create_causal_mask
from ...modeling_outputs import BaseModelOutputWithPast, MoeModelOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..bamba.configuration_bamba import BambaConfig
from ..bamba.modeling_bamba import BambaMixer, BambaRMSNormGated, HybridMambaAttentionDynamicCache
from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding
from ..granitemoeshared.modeling_granitemoeshared import (
GraniteFlashAttentionKwargs,
GraniteMoeSharedAttention,
GraniteMoeSharedDecoderLayer,
GraniteMoeSharedForCausalLM,
GraniteMoeSharedMLP,
GraniteMoeSharedModel,
GraniteMoeSharedMoE,
GraniteMoeSharedPreTrainedModel,
apply_rotary_pos_emb,
eager_attention_forward,
)
from .configuration_granitemoehybrid import GraniteMoeHybridConfig
logger = logging.get_logger(__name__)
class GraniteMoeHybridAttention(GraniteMoeSharedAttention):
def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int):
super().__init__(config, layer_idx)
def forward( # FIME: @ARTHUR this forward is also classic: attention nope
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, # None or rope embeddings
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
if position_embeddings is not None:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class GraniteMoeHybridMambaLayer(BambaMixer):
def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int):
super().__init__(BambaConfig(config), layer_idx)
class GraniteMoeHybridRMSNormGated(BambaRMSNormGated):
def __init__(self, hidden_size, eps=1e-6):
super().__init__(hidden_size, eps)
class GraniteMoeHybridMLP(GraniteMoeSharedMLP):
def __init__(self, config: GraniteMoeHybridConfig):
super().__init__(config)
class GraniteMoeHybridRotaryEmbedding(Gemma2RotaryEmbedding):
pass
class GraniteMoeHybridMoE(GraniteMoeSharedMoE):
pass
class GraniteMoeHybridDecoderLayer(GraniteMoeSharedDecoderLayer):
def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.shared_mlp = GraniteMoeHybridMLP(config)
# Either attention or mamba will be initialized, depending on the layer type.
self.self_attn = None
self.mamba = None
if config.layers_block_type[layer_idx] == "mamba":
self.mamba = GraniteMoeHybridMambaLayer(config, layer_idx)
else:
self.self_attn = GraniteMoeHybridAttention(config, layer_idx)
self.layer_type = config.layers_block_type[layer_idx]
# Allow non-MoE (dense)
self.block_sparse_moe = GraniteMoeHybridMoE(config) if config.num_local_experts > 0 else None
# Accept 0 experts: skip MoE if num_local_experts == 0
self.has_experts = getattr(config, "num_local_experts", 0) > 0
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[GraniteFlashAttentionKwargs],
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
if self.mamba is not None:
hidden_states = self.mamba(
hidden_states=hidden_states,
cache_position=cache_position,
cache_params=past_key_values,
attention_mask=attention_mask,
**kwargs,
)
else:
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states * self.residual_multiplier
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if self.has_experts:
moe_hidden_states = self.block_sparse_moe(hidden_states)
hidden_states = moe_hidden_states + self.shared_mlp(hidden_states)
else:
hidden_states = self.shared_mlp(hidden_states)
hidden_states = residual + hidden_states * self.residual_multiplier
return hidden_states
class GraniteMoeHybridPreTrainedModel(GraniteMoeSharedPreTrainedModel):
config: GraniteMoeHybridConfig
_no_split_modules = ["GraniteMoeHybridDecoderLayer"]
_is_stateful = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, GraniteMoeHybridMambaLayer):
init.ones_(module.dt_bias)
init.copy_(module.A_log, torch.log(torch.arange(1, module.num_heads + 1)))
init.ones_(module.D)
elif isinstance(module, GraniteMoeHybridRMSNormGated):
init.ones_(module.weight)
class GraniteMoeHybridModel(GraniteMoeSharedModel):
def __init__(self, config: GraniteMoeHybridConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[GraniteMoeHybridDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.embedding_multiplier = config.embedding_multiplier
self.rotary_emb = GraniteMoeHybridRotaryEmbedding(config) if config.position_embedding_type == "rope" else None
@auto_docstring
@merge_with_config_defaults
@capture_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[GraniteFlashAttentionKwargs],
) -> tuple | BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
inputs_embeds = inputs_embeds * self.embedding_multiplier
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
self.config,
inputs_embeds,
attention_mask,
cache_position,
past_key_values,
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
# embed positions
hidden_states = inputs_embeds
position_embeddings = None
if self.rotary_emb is not None:
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
# Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention)
layer_mask = mamba_mask if decoder_layer.layer_type == "mamba" else causal_mask
hidden_states = decoder_layer(
hidden_states,
attention_mask=layer_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
mamba_mask = None
return mamba_mask
class GraniteMoeHybridForCausalLM(GraniteMoeSharedForCausalLM):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config: GraniteMoeHybridConfig):
super().__init__(config)
self.model = GraniteMoeHybridModel(config)
# Initialize weights and apply final processing
self.post_init()
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, GraniteMoeHybridForCausalLM
>>> model = GraniteMoeHybridForCausalLM.from_pretrained("ibm-granite/granite-4.0-h-tiny")
>>> tokenizer = AutoTokenizer.from_pretrained("ibm-granite/granite-4.0-h-tiny")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
return super().forward(**super_kwargs)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- has a unique cache type, `HybridMambaAttentionDynamicCache`
if past_key_values is None and use_cache:
past_key_values = HybridMambaAttentionDynamicCache(
self.config, input_ids.shape[0], self.dtype, device=self.device
)
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
is_first_iteration=is_first_iteration,
**kwargs,
)
return model_inputs
__all__ = ["GraniteMoeHybridForCausalLM", "GraniteMoeHybridModel", "GraniteMoeHybridPreTrainedModel"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GraniteMoeHybrid model."""
import inspect
import tempfile
import unittest
import pytest
from parameterized import parameterized
from pytest import mark
from transformers import (
AutoTokenizer,
DataCollatorWithFlattening,
GraniteMoeHybridConfig,
is_torch_available,
)
from transformers.testing_utils import (
require_flash_attn,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...models.bamba.test_modeling_bamba import BambaModelTester
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GraniteMoeHybridForCausalLM,
GraniteMoeHybridModel,
)
from transformers.models.granitemoehybrid.modeling_granitemoehybrid import HybridMambaAttentionDynamicCache
class GraniteMoeHybridModelTester(BambaModelTester):
config_class = GraniteMoeHybridConfig
if is_torch_available():
model_class = GraniteMoeHybridModel
for_causal_lm_class = GraniteMoeHybridForCausalLM
def __init__(
self,
parent,
use_cache=False,
shared_intermediate_size=174,
layer_types=None,
):
super().__init__(parent)
self.shared_intermediate_size = shared_intermediate_size
self.layer_types = layer_types
self.use_cache = use_cache
def _update_layer_configs(self):
super()._update_layer_configs()
# GraniteMoeHybrid uses layer_types instead of attn_layer_indices
self.layer_types = ["mamba"] * self.num_hidden_layers
for idx in self.attn_layer_indices:
self.layer_types[idx] = "attention"
def get_config(self):
return super().get_config(
shared_intermediate_size=self.shared_intermediate_size,
layer_types=self.layer_types,
)
@require_torch
class GraniteMoeHybridModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
model_tester_class = GraniteMoeHybridModelTester
all_model_classes = (
(
GraniteMoeHybridModel,
GraniteMoeHybridForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GraniteMoeHybridModel,
"text-generation": GraniteMoeHybridForCausalLM,
}
if is_torch_available()
else {}
)
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def _check_caches_are_equal(
self, cache1: HybridMambaAttentionDynamicCache, cache2: HybridMambaAttentionDynamicCache
):
if not isinstance(cache1, HybridMambaAttentionDynamicCache) or not isinstance(
cache2, HybridMambaAttentionDynamicCache
):
raise ValueError("The wrong cache is being used!")
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
torch.testing.assert_close(cache1.conv_states[idx], cache2.conv_states[idx])
torch.testing.assert_close(cache1.ssm_states[idx], cache2.ssm_states[idx])
def setUp(self):
self.model_tester = self.model_tester_class(self)
self.config_tester = ConfigTester(self, config_class=self.model_tester.config_class, hidden_size=64)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Bamba model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
expected_num_attentions = self.model_tester.num_hidden_layers - len(self.model_tester.attn_layer_indices)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_batching_equivalence(self):
# need to disable the tril input mask
orig = self.model_tester.use_input_mask
self.model_tester.use_input_mask = False
super().test_batching_equivalence()
self.model_tester.use_input_mask = orig
@pytest.mark.generate
def test_left_padding_compatibility(self):
# TODO: document why a random attention mask causes this test to fail, but a full mask doesn't
unpadded_custom_inputs = {"attention_mask": None}
super().test_left_padding_compatibility(unpadded_custom_inputs=unpadded_custom_inputs)
@unittest.skip(
"Bamba requires additionally specifying position_ids, seq_idx, and FlashAttentionKwargs for padding-free training."
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
"Bamba requires additionally specifying position_ids, seq_idx, and FlashAttentionKwargs for padding-free training."
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@require_flash_attn
@require_torch_accelerator
@mark.flash_attn_test
@slow
@unittest.skip(
"NotImplementedError: seq_idx support requires fast path support. Please install mamba_ssm and causal_conv1d"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_seq_idx_and_fa_kwargs(self):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
max_new_tokens = 30
for model_class in self.all_generative_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict:
self.skipTest("Model dummy inputs should contain padding in their attention mask")
dummy_input = inputs_dict[model_class.main_input_name]
if dummy_input.dtype in [torch.float32, torch.bfloat16]:
dummy_input = dummy_input.to(torch.float16)
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# ensure left padding, to adapt for some models
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
# Ensure inputs_dict also has labels in it, as their presence/absence can induce
# dtype conversions. This also lets us compare losses.
labels = inputs_dict["input_ids"].clone()
# Mask padding tokens
labels[~dummy_attention_mask.bool()] = -100
# Also need to mask the first non-trivial token to match the padding-free batch.
first_nonneg_idx = (labels >= 0).int().argmax(dim=1)
labels[torch.arange(labels.size(0), device=labels.device), first_nonneg_idx] = -100
inputs_dict["labels"] = labels
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation="flash_attention_2",
)
.to(torch_device)
.eval()
)
# flatten
features = [
{"input_ids": i[a.bool()].tolist()}
for i, a in zip(inputs_dict["input_ids"], inputs_dict["attention_mask"])
]
# add position_ids + fa_kwargs + seq_idx
data_collator = DataCollatorWithFlattening(
return_tensors="pt", return_seq_idx=True, return_flash_attn_kwargs=True
)
batch = data_collator(features)
batch_accelerator = {k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items()}
res_padded = model(**inputs_dict)
res_padfree = model(**batch_accelerator)
logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()]
logits_padfree = res_padfree.logits[0]
torch.testing.assert_close(logits_padded.argmax(-1), logits_padfree.argmax(-1), rtol=0, atol=0)
# acceptable numerical instability
tol = torch.finfo(torch.float16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
loss_padded = res_padded.loss
loss_padfree = res_padfree.loss
torch.testing.assert_close(loss_padded, loss_padfree)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, HybridMambaAttentionDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
attention_shape = (batch_size, num_heads, seq_length, head_dim)
conv_shape = (
batch_size,
config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * config.mamba_d_state,
config.mamba_d_conv,
)
ssm_shape = (batch_size, config.mamba_n_heads, config.mamba_d_head, config.mamba_d_state)
self.assertTrue(config.num_hidden_layers, len(past_key_values))
for idx in range(len(past_key_values)):
if config.layers_block_type[idx] == "mamba":
self.assertEqual(past_key_values.conv_states[idx].shape, conv_shape)
self.assertEqual(past_key_values.ssm_states[idx].shape, ssm_shape)
else:
self.assertEqual(past_key_values.key_cache[idx].shape, attention_shape)
self.assertEqual(past_key_values.value_cache[idx].shape, attention_shape)
def test_config_requires_mamba_or_attention_layers(self):
"""Ensure we can't create a config with disallowed layers."""
with pytest.raises(ValueError):
GraniteMoeHybridConfig(layer_types=["not allowed!"])
@require_torch_accelerator
class GraniteMoeHybridIntegrationTest(unittest.TestCase):
@slow
@parameterized.expand([("cpu",)]) # runners crash with `cuda`, prob they have mamba kernels installed
def test_model_logits(self, device):
input_ids = [31390, 631, 4162, 30, 322, 25342, 432, 1875, 43826, 10066, 688, 225]
model = GraniteMoeHybridForCausalLM.from_pretrained("ibm-granite/granite-4.0-h-tiny", device_map=device)
with torch.no_grad():
out = model(torch.tensor([input_ids]).to(device))
# fmt: off
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([
[-0.3543, -1.0066, -0.5338, -0.8816, -0.7438, 0.0500, -1.3644, -0.0742, -1.7746, -1.6326, -1.4802, -0.4961]
], device=device)
torch.testing.assert_close(EXPECTED_MEAN, out.logits.float().mean(-1), rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:15]
EXPECTED_SLICE = torch.tensor([
[6.5938, 7.2500, 1.6484, 5.2188, 3.5781, 2.5469, 6.1250, 5.1875, 9.5000, 4.6875, 4.7188, 10.7500, 10.3125, 7.8438, 5.5312]
], device=device)
# fmt: on
self.assertTrue(
torch.allclose(
EXPECTED_SLICE,
out.logits[0, 0, :15].float(),
atol=1e-3,
rtol=1e-3,
)
)
@slow
@parameterized.expand([("cpu",)])
def test_model_generation(self, device):
EXPECTED_TEXT_COMPLETION = "Simply put, the theory of relativity states that 1) the laws of physics are the same for all observers in uniform motion relative"
prompt = "Simply put, the theory of relativity states that "
tokenizer = AutoTokenizer.from_pretrained("ibm-granite/granite-4.0-h-tiny")
model = GraniteMoeHybridForCausalLM.from_pretrained("ibm-granite/granite-4.0-h-tiny", device_map=device)
model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# greedy generation outputs
generated_ids = model.generate(**model_inputs, max_new_tokens=16, do_sample=False)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py",
"license": "Apache License 2.0",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:src/transformers/loss/loss_d_fine.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import is_vision_available
from .loss_for_object_detection import box_iou
from .loss_rt_detr import RTDetrHungarianMatcher, RTDetrLoss
if is_vision_available():
from transformers.image_transforms import center_to_corners_format
def _set_aux_loss(outputs_class, outputs_coord):
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class, outputs_coord)]
def _set_aux_loss2(
outputs_class, outputs_coord, outputs_corners, outputs_ref, teacher_corners=None, teacher_logits=None
):
return [
{
"logits": a,
"pred_boxes": b,
"pred_corners": c,
"ref_points": d,
"teacher_corners": teacher_corners,
"teacher_logits": teacher_logits,
}
for a, b, c, d in zip(outputs_class, outputs_coord, outputs_corners, outputs_ref)
]
def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:
"""
Generates the non-uniform Weighting Function W(n) for bounding box regression.
Args:
max_num_bins (int): Max number of the discrete bins.
up (Tensor): Controls upper bounds of the sequence,
where maximum offset is ±up * H / W.
reg_scale (float): Controls the curvature of the Weighting Function.
Larger values result in flatter weights near the central axis W(max_num_bins/2)=0
and steeper weights at both ends.
Returns:
Tensor: Sequence of Weighting Function.
"""
upper_bound1 = abs(up[0]) * abs(reg_scale)
upper_bound2 = abs(up[0]) * abs(reg_scale) * 2
step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))
left_values = [-((step) ** i) + 1 for i in range(max_num_bins // 2 - 1, 0, -1)]
right_values = [(step) ** i - 1 for i in range(1, max_num_bins // 2)]
values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]
values = [v if v.dim() > 0 else v.unsqueeze(0) for v in values]
values = torch.cat(values, 0)
return values
def translate_gt(gt: torch.Tensor, max_num_bins: int, reg_scale: int, up: torch.Tensor):
"""
Decodes bounding box ground truth (GT) values into distribution-based GT representations.
This function maps continuous GT values into discrete distribution bins, which can be used
for regression tasks in object detection models. It calculates the indices of the closest
bins to each GT value and assigns interpolation weights to these bins based on their proximity
to the GT value.
Args:
gt (Tensor): Ground truth bounding box values, shape (N, ).
max_num_bins (int): Maximum number of discrete bins for the distribution.
reg_scale (float): Controls the curvature of the Weighting Function.
up (Tensor): Controls the upper bounds of the Weighting Function.
Returns:
tuple[Tensor, Tensor, Tensor]:
- indices (Tensor): Index of the left bin closest to each GT value, shape (N, ).
- weight_right (Tensor): Weight assigned to the right bin, shape (N, ).
- weight_left (Tensor): Weight assigned to the left bin, shape (N, ).
"""
gt = gt.reshape(-1)
function_values = weighting_function(max_num_bins, up, reg_scale)
# Find the closest left-side indices for each value
diffs = function_values.unsqueeze(0) - gt.unsqueeze(1)
mask = diffs <= 0
closest_left_indices = torch.sum(mask, dim=1) - 1
# Calculate the weights for the interpolation
indices = closest_left_indices.float()
weight_right = torch.zeros_like(indices)
weight_left = torch.zeros_like(indices)
valid_idx_mask = (indices >= 0) & (indices < max_num_bins)
valid_indices = indices[valid_idx_mask].long()
# Obtain distances
left_values = function_values[valid_indices]
right_values = function_values[valid_indices + 1]
left_diffs = torch.abs(gt[valid_idx_mask] - left_values)
right_diffs = torch.abs(right_values - gt[valid_idx_mask])
# Valid weights
weight_right[valid_idx_mask] = left_diffs / (left_diffs + right_diffs)
weight_left[valid_idx_mask] = 1.0 - weight_right[valid_idx_mask]
# Invalid weights (out of range)
invalid_idx_mask_neg = indices < 0
weight_right[invalid_idx_mask_neg] = 0.0
weight_left[invalid_idx_mask_neg] = 1.0
indices[invalid_idx_mask_neg] = 0.0
invalid_idx_mask_pos = indices >= max_num_bins
weight_right[invalid_idx_mask_pos] = 1.0
weight_left[invalid_idx_mask_pos] = 0.0
indices[invalid_idx_mask_pos] = max_num_bins - 0.1
return indices, weight_right, weight_left
def bbox2distance(points, bbox, max_num_bins, reg_scale, up, eps=0.1):
"""
Converts bounding box coordinates to distances from a reference point.
Args:
points (Tensor): (n, 4) [x, y, w, h], where (x, y) is the center.
bbox (Tensor): (n, 4) bounding boxes in "xyxy" format.
max_num_bins (float): Maximum bin value.
reg_scale (float): Controlling curvarture of W(n).
up (Tensor): Controlling upper bounds of W(n).
eps (float): Small value to ensure target < max_num_bins.
Returns:
Tensor: Decoded distances.
"""
reg_scale = abs(reg_scale)
left = (points[:, 0] - bbox[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale
top = (points[:, 1] - bbox[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale
right = (bbox[:, 2] - points[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale
bottom = (bbox[:, 3] - points[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale
four_lens = torch.stack([left, top, right, bottom], -1)
four_lens, weight_right, weight_left = translate_gt(four_lens, max_num_bins, reg_scale, up)
if max_num_bins is not None:
four_lens = four_lens.clamp(min=0, max=max_num_bins - eps)
return four_lens.reshape(-1).detach(), weight_right.detach(), weight_left.detach()
class DFineLoss(RTDetrLoss):
"""
This class computes the losses for D-FINE. The process happens in two steps: 1) we compute hungarian assignment
between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth /
prediction (supervise class and box).
Args:
matcher (`DetrHungarianMatcher`):
Module able to compute a matching between targets and proposals.
weight_dict (`Dict`):
Dictionary relating each loss with its weights. These losses are configured in DFineConf as
`weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou`, `weight_loss_fgl`, `weight_loss_ddf`
losses (`list[str]`):
List of all the losses to be applied. See `get_loss` for a list of all available losses.
alpha (`float`):
Parameter alpha used to compute the focal loss.
gamma (`float`):
Parameter gamma used to compute the focal loss.
eos_coef (`float`):
Relative classification weight applied to the no-object category.
num_classes (`int`):
Number of object categories, omitting the special no-object category.
"""
def __init__(self, config):
super().__init__(config)
self.matcher = RTDetrHungarianMatcher(config)
self.max_num_bins = config.max_num_bins
self.weight_dict = {
"loss_vfl": config.weight_loss_vfl,
"loss_bbox": config.weight_loss_bbox,
"loss_giou": config.weight_loss_giou,
"loss_fgl": config.weight_loss_fgl,
"loss_ddf": config.weight_loss_ddf,
}
self.losses = ["vfl", "boxes", "local"]
self.reg_scale = config.reg_scale
self.up = nn.Parameter(torch.tensor([config.up]), requires_grad=False)
def unimodal_distribution_focal_loss(
self, pred, label, weight_right, weight_left, weight=None, reduction="sum", avg_factor=None
):
dis_left = label.long()
dis_right = dis_left + 1
loss = F.cross_entropy(pred, dis_left, reduction="none") * weight_left.reshape(-1) + F.cross_entropy(
pred, dis_right, reduction="none"
) * weight_right.reshape(-1)
if weight is not None:
weight = weight.float()
loss = loss * weight
if avg_factor is not None:
loss = loss.sum() / avg_factor
elif reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
def loss_local(self, outputs, targets, indices, num_boxes, T=5):
"""Compute Fine-Grained Localization (FGL) Loss
and Decoupled Distillation Focal (DDF) Loss."""
losses = {}
if "pred_corners" in outputs:
idx = self._get_source_permutation_idx(indices)
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
pred_corners = outputs["pred_corners"][idx].reshape(-1, (self.max_num_bins + 1))
ref_points = outputs["ref_points"][idx].detach()
with torch.no_grad():
self.fgl_targets = bbox2distance(
ref_points,
center_to_corners_format(target_boxes),
self.max_num_bins,
self.reg_scale,
self.up,
)
target_corners, weight_right, weight_left = self.fgl_targets
ious = torch.diag(
box_iou(center_to_corners_format(outputs["pred_boxes"][idx]), center_to_corners_format(target_boxes))[
0
]
)
weight_targets = ious.unsqueeze(-1).repeat(1, 1, 4).reshape(-1).detach()
losses["loss_fgl"] = self.unimodal_distribution_focal_loss(
pred_corners,
target_corners,
weight_right,
weight_left,
weight_targets,
avg_factor=num_boxes,
)
pred_corners = outputs["pred_corners"].reshape(-1, (self.max_num_bins + 1))
target_corners = outputs["teacher_corners"].reshape(-1, (self.max_num_bins + 1))
if torch.equal(pred_corners, target_corners):
losses["loss_ddf"] = pred_corners.sum() * 0
else:
weight_targets_local = outputs["teacher_logits"].sigmoid().max(dim=-1)[0]
mask = torch.zeros_like(weight_targets_local, dtype=torch.bool)
mask[idx] = True
mask = mask.unsqueeze(-1).repeat(1, 1, 4).reshape(-1)
weight_targets_local[idx] = ious.reshape_as(weight_targets_local[idx]).to(weight_targets_local.dtype)
weight_targets_local = weight_targets_local.unsqueeze(-1).repeat(1, 1, 4).reshape(-1).detach()
loss_match_local = (
weight_targets_local
* (T**2)
* (
nn.KLDivLoss(reduction="none")(
F.log_softmax(pred_corners / T, dim=1),
F.softmax(target_corners.detach() / T, dim=1),
)
).sum(-1)
)
batch_scale = 1 / outputs["pred_boxes"].shape[0] # it should be refined
self.num_pos, self.num_neg = (
(mask.sum() * batch_scale) ** 0.5,
((~mask).sum() * batch_scale) ** 0.5,
)
loss_match_local1 = loss_match_local[mask].mean() if mask.any() else 0
loss_match_local2 = loss_match_local[~mask].mean() if (~mask).any() else 0
losses["loss_ddf"] = (loss_match_local1 * self.num_pos + loss_match_local2 * self.num_neg) / (
self.num_pos + self.num_neg
)
return losses
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"cardinality": self.loss_cardinality,
"local": self.loss_local,
"boxes": self.loss_boxes,
"focal": self.loss_labels_focal,
"vfl": self.loss_labels_vfl,
}
if loss not in loss_map:
raise ValueError(f"Loss {loss} not supported")
return loss_map[loss](outputs, targets, indices, num_boxes)
def DFineForObjectDetectionLoss(
logits,
labels,
device,
pred_boxes,
config,
outputs_class=None,
outputs_coord=None,
enc_topk_logits=None,
enc_topk_bboxes=None,
denoising_meta_values=None,
predicted_corners=None,
initial_reference_points=None,
**kwargs,
):
criterion = DFineLoss(config)
criterion.to(device)
# Second: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes.clamp(min=0, max=1)
auxiliary_outputs = None
if config.auxiliary_loss:
if denoising_meta_values is not None:
dn_out_coord, outputs_coord = torch.split(
outputs_coord.clamp(min=0, max=1), denoising_meta_values["dn_num_split"], dim=2
)
dn_out_class, outputs_class = torch.split(outputs_class, denoising_meta_values["dn_num_split"], dim=2)
dn_out_corners, out_corners = torch.split(predicted_corners, denoising_meta_values["dn_num_split"], dim=2)
dn_out_refs, out_refs = torch.split(initial_reference_points, denoising_meta_values["dn_num_split"], dim=2)
auxiliary_outputs = _set_aux_loss2(
outputs_class[:, :-1].transpose(0, 1),
outputs_coord[:, :-1].transpose(0, 1),
out_corners[:, :-1].transpose(0, 1),
out_refs[:, :-1].transpose(0, 1),
out_corners[:, -1],
outputs_class[:, -1],
)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
outputs_loss["auxiliary_outputs"].extend(
_set_aux_loss([enc_topk_logits], [enc_topk_bboxes.clamp(min=0, max=1)])
)
dn_auxiliary_outputs = _set_aux_loss2(
dn_out_class.transpose(0, 1),
dn_out_coord.transpose(0, 1),
dn_out_corners.transpose(0, 1),
dn_out_refs.transpose(0, 1),
dn_out_corners[:, -1],
dn_out_class[:, -1],
)
outputs_loss["dn_auxiliary_outputs"] = dn_auxiliary_outputs
outputs_loss["denoising_meta_values"] = denoising_meta_values
loss_dict = criterion(outputs_loss, labels)
loss = sum(loss_dict.values())
return loss, loss_dict, auxiliary_outputs
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/loss/loss_d_fine.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/d_fine/convert_d_fine_original_pytorch_checkpoint_to_hf.py | # Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
from io import BytesIO
from pathlib import Path
import httpx
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import DFineConfig, DFineForObjectDetection, RTDetrImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_d_fine_config(model_name: str) -> DFineConfig:
config = DFineConfig()
config.num_labels = 80
repo_id = "huggingface/label-files"
filename = "object365-id2label.json" if "obj365" in model_name else "coco-detection-mmdet-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
config.backbone_config.hidden_sizes = [64, 128, 256, 512]
config.backbone_config.layer_type = "basic"
config.backbone_config.embedding_size = 32
config.hidden_expansion = 1.0
config.decoder_layers = 6
if model_name in ["dfine_x_coco", "dfine_x_obj2coco", "dfine_x_obj365"]:
config.backbone_config.hidden_sizes = [256, 512, 1024, 2048]
config.backbone_config.stage_in_channels = [64, 128, 512, 1024]
config.backbone_config.stage_mid_channels = [64, 128, 256, 512]
config.backbone_config.stage_out_channels = [128, 512, 1024, 2048]
config.backbone_config.stage_num_blocks = [1, 2, 5, 2]
config.backbone_config.stage_downsample = [False, True, True, True]
config.backbone_config.stage_light_block = [False, False, True, True]
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
config.backbone_config.stage_numb_of_layers = [6, 6, 6, 6]
config.backbone_config.stem_channels = [3, 32, 64]
config.encoder_in_channels = [512, 1024, 2048]
config.encoder_hidden_dim = 384
config.encoder_ffn_dim = 2048
config.decoder_n_points = [3, 6, 3]
config.decoder_in_channels = [384, 384, 384]
if model_name == "dfine_x_obj365":
config.num_labels = 366
elif model_name in ["dfine_m_coco", "dfine_m_obj2coco", "dfine_m_obj365"]:
config.backbone_config.hidden_sizes = [192, 384, 768, 1536]
config.backbone_config.stem_channels = [3, 24, 32]
config.backbone_config.stage_in_channels = [32, 96, 384, 768]
config.backbone_config.stage_mid_channels = [32, 64, 128, 256]
config.backbone_config.stage_out_channels = [96, 384, 768, 1536]
config.backbone_config.stage_num_blocks = [1, 1, 3, 1]
config.backbone_config.stage_downsample = [False, True, True, True]
config.backbone_config.stage_light_block = [False, False, True, True]
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
config.backbone_config.stage_numb_of_layers = [4, 4, 4, 4]
config.decoder_layers = 4
config.decoder_n_points = [3, 6, 3]
config.encoder_in_channels = [384, 768, 1536]
config.backbone_config.use_learnable_affine_block = True
config.depth_mult = 0.67
if model_name == "dfine_m_obj365":
config.num_labels = 366
elif model_name in ["dfine_l_coco", "dfine_l_obj2coco_e25", "dfine_l_obj365"]:
config.backbone_config.hidden_sizes = [256, 512, 1024, 2048]
config.backbone_config.stem_channels = [3, 32, 48]
config.backbone_config.stage_in_channels = [48, 128, 512, 1024]
config.backbone_config.stage_mid_channels = [48, 96, 192, 384]
config.backbone_config.stage_out_channels = [128, 512, 1024, 2048]
config.backbone_config.stage_num_blocks = [1, 1, 3, 1]
config.backbone_config.stage_downsample = [False, True, True, True]
config.backbone_config.stage_light_block = [False, False, True, True]
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
config.backbone_config.stage_numb_of_layers = [6, 6, 6, 6]
config.encoder_ffn_dim = 1024
config.encoder_in_channels = [512, 1024, 2048]
config.decoder_n_points = [3, 6, 3]
if model_name == "dfine_l_obj365":
config.num_labels = 366
elif model_name in ["dfine_n_coco", "dfine_n_obj2coco_e25", "dfine_n_obj365"]:
config.backbone_config.hidden_sizes = [128, 256, 512, 1024]
config.backbone_config.stem_channels = [3, 16, 16]
config.backbone_config.stage_in_channels = [16, 64, 256, 512]
config.backbone_config.stage_mid_channels = [16, 32, 64, 128]
config.backbone_config.stage_out_channels = [64, 256, 512, 1024]
config.backbone_config.stage_num_blocks = [1, 1, 2, 1]
config.backbone_config.stage_downsample = [False, True, True, True]
config.backbone_config.stage_light_block = [False, False, True, True]
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
config.backbone_config.stage_numb_of_layers = [3, 3, 3, 3]
config.backbone_config.out_indices = [3, 4]
config.backbone_config.use_learnable_affine_block = True
config.num_feature_levels = 2
config.encoder_ffn_dim = 512
config.encode_proj_layers = [1]
config.d_model = 128
config.encoder_hidden_dim = 128
config.decoder_ffn_dim = 512
config.encoder_in_channels = [512, 1024]
config.decoder_n_points = [6, 6]
config.decoder_in_channels = [128, 128]
config.feat_strides = [16, 32]
config.depth_mult = 0.5
config.decoder_layers = 3
config.hidden_expansion = 0.34
if model_name == "dfine_n_obj365":
config.num_labels = 366
else:
config.backbone_config.hidden_sizes = [128, 256, 512, 1024]
config.backbone_config.stem_channels = [3, 16, 16]
config.backbone_config.stage_in_channels = [16, 64, 256, 512]
config.backbone_config.stage_mid_channels = [16, 32, 64, 128]
config.backbone_config.stage_out_channels = [64, 256, 512, 1024]
config.backbone_config.stage_num_blocks = [1, 1, 2, 1]
config.backbone_config.stage_downsample = [False, True, True, True]
config.backbone_config.stage_light_block = [False, False, True, True]
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
config.backbone_config.stage_numb_of_layers = [3, 3, 3, 3]
config.decoder_layers = 3
config.hidden_expansion = 0.5
config.depth_mult = 0.34
config.decoder_n_points = [3, 6, 3]
config.encoder_in_channels = [256, 512, 1024]
config.backbone_config.use_learnable_affine_block = True
if model_name == "dfine_s_obj365":
config.num_labels = 366
return config
def load_original_state_dict(repo_id, model_name):
directory_path = hf_hub_download(repo_id=repo_id, filename=f"{model_name}.pth")
original_state_dict = {}
model = torch.load(directory_path, map_location="cpu")["model"]
for key in model:
original_state_dict[key] = model[key]
return original_state_dict
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# Decoder base mappings
r"decoder.valid_mask": r"model.decoder.valid_mask",
r"decoder.anchors": r"model.decoder.anchors",
r"decoder.up": r"model.decoder.up",
r"decoder.reg_scale": r"model.decoder.reg_scale",
# Backbone stem mappings - including stem2a and stem2b
r"backbone.stem.stem1.conv.weight": r"model.backbone.model.embedder.stem1.convolution.weight",
r"backbone.stem.stem2a.conv.weight": r"model.backbone.model.embedder.stem2a.convolution.weight",
r"backbone.stem.stem2b.conv.weight": r"model.backbone.model.embedder.stem2b.convolution.weight",
r"backbone.stem.stem3.conv.weight": r"model.backbone.model.embedder.stem3.convolution.weight",
r"backbone.stem.stem4.conv.weight": r"model.backbone.model.embedder.stem4.convolution.weight",
# Stem normalization
r"backbone.stem.stem1.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem1.normalization.\1",
r"backbone.stem.stem2a.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem2a.normalization.\1",
r"backbone.stem.stem2b.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem2b.normalization.\1",
r"backbone.stem.stem3.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem3.normalization.\1",
r"backbone.stem.stem4.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem4.normalization.\1",
# Stem lab parameters - fixed with .lab in the path
r"backbone.stem.stem1.lab.(scale|bias)": r"model.backbone.model.embedder.stem1.lab.\1",
r"backbone.stem.stem2a.lab.(scale|bias)": r"model.backbone.model.embedder.stem2a.lab.\1",
r"backbone.stem.stem2b.lab.(scale|bias)": r"model.backbone.model.embedder.stem2b.lab.\1",
r"backbone.stem.stem3.lab.(scale|bias)": r"model.backbone.model.embedder.stem3.lab.\1",
r"backbone.stem.stem4.lab.(scale|bias)": r"model.backbone.model.embedder.stem4.lab.\1",
# Backbone stages mappings
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.convolution.weight",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.normalization.\4",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv1.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv1.convolution.weight",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv2.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv2.convolution.weight",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv1.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv1.normalization.\4",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv2.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv2.normalization.\4",
# Backbone stages aggregation
r"backbone.stages.(\d+).blocks.(\d+).aggregation.0.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.0.convolution.weight",
r"backbone.stages.(\d+).blocks.(\d+).aggregation.1.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.1.convolution.weight",
r"backbone.stages.(\d+).blocks.(\d+).aggregation.0.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.0.normalization.\3",
r"backbone.stages.(\d+).blocks.(\d+).aggregation.1.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.1.normalization.\3",
# Backbone stages lab parameters for aggregation
r"backbone.stages.(\d+).blocks.(\d+).aggregation.0.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.0.lab.\3",
r"backbone.stages.(\d+).blocks.(\d+).aggregation.1.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.1.lab.\3",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.lab.\4",
# Conv1/Conv2 layers with lab
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv1.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv1.lab.\4",
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv2.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv2.lab.\4",
# Downsample with lab
r"backbone.stages.(\d+).downsample.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.downsample.lab.\2",
# Backbone downsample
r"backbone.stages.(\d+).downsample.conv.weight": r"model.backbone.model.encoder.stages.\1.downsample.convolution.weight",
r"backbone.stages.(\d+).downsample.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.downsample.normalization.\2",
# Encoder mappings
r"encoder.encoder.(\d+).layers.0.self_attn.out_proj.(weight|bias)": r"model.encoder.encoder.\1.layers.0.self_attn.out_proj.\2",
r"encoder.encoder.(\d+).layers.0.linear1.(weight|bias)": r"model.encoder.encoder.\1.layers.0.fc1.\2",
r"encoder.encoder.(\d+).layers.0.linear2.(weight|bias)": r"model.encoder.encoder.\1.layers.0.fc2.\2",
r"encoder.encoder.(\d+).layers.0.norm1.(weight|bias)": r"model.encoder.encoder.\1.layers.0.self_attn_layer_norm.\2",
r"encoder.encoder.(\d+).layers.0.norm2.(weight|bias)": r"model.encoder.encoder.\1.layers.0.final_layer_norm.\2",
# Encoder projections and convolutions
r"encoder.input_proj.(\d+).conv.weight": r"model.encoder_input_proj.\1.0.weight",
r"encoder.input_proj.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder_input_proj.\1.1.\2",
r"encoder.lateral_convs.(\d+).conv.weight": r"model.encoder.lateral_convs.\1.conv.weight",
r"encoder.lateral_convs.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.lateral_convs.\1.norm.\2",
# FPN blocks - complete structure
# Basic convolutions
r"encoder.fpn_blocks.(\d+).cv1.conv.weight": r"model.encoder.fpn_blocks.\1.conv1.conv.weight",
r"encoder.fpn_blocks.(\d+).cv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv1.norm.\2",
# CSP Rep1 path
r"encoder.fpn_blocks.(\d+).cv2.0.conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.conv1.conv.weight",
r"encoder.fpn_blocks.(\d+).cv2.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.conv1.norm.\2",
r"encoder.fpn_blocks.(\d+).cv2.0.conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.conv2.conv.weight",
r"encoder.fpn_blocks.(\d+).cv2.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.conv2.norm.\2",
r"encoder.fpn_blocks.(\d+).cv2.1.conv.weight": r"model.encoder.fpn_blocks.\1.conv2.conv.weight",
r"encoder.fpn_blocks.(\d+).cv2.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv2.norm.\2",
# CSP Rep2 path
r"encoder.fpn_blocks.(\d+).cv3.0.conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.conv1.conv.weight",
r"encoder.fpn_blocks.(\d+).cv3.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.conv1.norm.\2",
r"encoder.fpn_blocks.(\d+).cv3.0.conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.conv2.conv.weight",
r"encoder.fpn_blocks.(\d+).cv3.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.conv2.norm.\2",
r"encoder.fpn_blocks.(\d+).cv3.1.conv.weight": r"model.encoder.fpn_blocks.\1.conv3.conv.weight",
r"encoder.fpn_blocks.(\d+).cv3.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv3.norm.\2",
# Final conv
r"encoder.fpn_blocks.(\d+).cv4.conv.weight": r"model.encoder.fpn_blocks.\1.conv4.conv.weight",
r"encoder.fpn_blocks.(\d+).cv4.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv4.norm.\2",
# Bottlenecks for CSP Rep1
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv1.conv.weight",
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv1.norm.\3",
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv2.conv.weight",
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv2.norm.\3",
# Bottlenecks for CSP Rep2
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv1.conv.weight",
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv1.norm.\3",
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv2.conv.weight",
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv2.norm.\3",
# PAN blocks - complete structure
# Basic convolutions
r"encoder.pan_blocks.(\d+).cv1.conv.weight": r"model.encoder.pan_blocks.\1.conv1.conv.weight",
r"encoder.pan_blocks.(\d+).cv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv1.norm.\2",
# CSP Rep1 path
r"encoder.pan_blocks.(\d+).cv2.0.conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.conv1.conv.weight",
r"encoder.pan_blocks.(\d+).cv2.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.conv1.norm.\2",
r"encoder.pan_blocks.(\d+).cv2.0.conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.conv2.conv.weight",
r"encoder.pan_blocks.(\d+).cv2.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.conv2.norm.\2",
r"encoder.pan_blocks.(\d+).cv2.1.conv.weight": r"model.encoder.pan_blocks.\1.conv2.conv.weight",
r"encoder.pan_blocks.(\d+).cv2.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv2.norm.\2",
# CSP Rep2 path
r"encoder.pan_blocks.(\d+).cv3.0.conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.conv1.conv.weight",
r"encoder.pan_blocks.(\d+).cv3.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.conv1.norm.\2",
r"encoder.pan_blocks.(\d+).cv3.0.conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.conv2.conv.weight",
r"encoder.pan_blocks.(\d+).cv3.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.conv2.norm.\2",
r"encoder.pan_blocks.(\d+).cv3.1.conv.weight": r"model.encoder.pan_blocks.\1.conv3.conv.weight",
r"encoder.pan_blocks.(\d+).cv3.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv3.norm.\2",
# Final conv
r"encoder.pan_blocks.(\d+).cv4.conv.weight": r"model.encoder.pan_blocks.\1.conv4.conv.weight",
r"encoder.pan_blocks.(\d+).cv4.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv4.norm.\2",
# Bottlenecks for CSP Rep1
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv1.conv.weight",
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv1.norm.\3",
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv2.conv.weight",
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv2.norm.\3",
# Bottlenecks for CSP Rep2
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv1.conv.weight",
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv1.norm.\3",
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv2.conv.weight",
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv2.norm.\3",
# Downsample convolutions
r"encoder.downsample_convs.(\d+).0.cv(\d+).conv.weight": r"model.encoder.downsample_convs.\1.conv\2.conv.weight",
r"encoder.downsample_convs.(\d+).0.cv(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.downsample_convs.\1.conv\2.norm.\3",
# Decoder layers
r"decoder.decoder.layers.(\d+).self_attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.self_attn.out_proj.\2",
r"decoder.decoder.layers.(\d+).cross_attn.sampling_offsets.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.sampling_offsets.\2",
r"decoder.decoder.layers.(\d+).cross_attn.attention_weights.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.attention_weights.\2",
r"decoder.decoder.layers.(\d+).cross_attn.value_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.value_proj.\2",
r"decoder.decoder.layers.(\d+).cross_attn.output_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.output_proj.\2",
r"decoder.decoder.layers.(\d+).cross_attn.num_points_scale": r"model.decoder.layers.\1.encoder_attn.num_points_scale",
r"decoder.decoder.layers.(\d+).gateway.gate.(weight|bias)": r"model.decoder.layers.\1.gateway.gate.\2",
r"decoder.decoder.layers.(\d+).gateway.norm.(weight|bias)": r"model.decoder.layers.\1.gateway.norm.\2",
r"decoder.decoder.layers.(\d+).norm1.(weight|bias)": r"model.decoder.layers.\1.self_attn_layer_norm.\2",
r"decoder.decoder.layers.(\d+).norm2.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_layer_norm.\2",
r"decoder.decoder.layers.(\d+).norm3.(weight|bias)": r"model.decoder.layers.\1.final_layer_norm.\2",
r"decoder.decoder.layers.(\d+).linear1.(weight|bias)": r"model.decoder.layers.\1.fc1.\2",
r"decoder.decoder.layers.(\d+).linear2.(weight|bias)": r"model.decoder.layers.\1.fc2.\2",
# LQE layers
r"decoder.decoder.lqe_layers.(\d+).reg_conf.layers.(\d+).(weight|bias)": r"model.decoder.lqe_layers.\1.reg_conf.layers.\2.\3",
# Decoder heads and projections
r"decoder.dec_score_head.(\d+).(weight|bias)": r"model.decoder.class_embed.\1.\2",
r"decoder.dec_bbox_head.(\d+).layers.(\d+).(weight|bias)": r"model.decoder.bbox_embed.\1.layers.\2.\3",
r"decoder.pre_bbox_head.layers.(\d+).(weight|bias)": r"model.decoder.pre_bbox_head.layers.\1.\2",
r"decoder.input_proj.(\d+).conv.weight": r"model.decoder_input_proj.\1.0.weight",
r"decoder.input_proj.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.decoder_input_proj.\1.1.\2",
# Other decoder components
r"decoder.denoising_class_embed.weight": r"model.denoising_class_embed.weight",
r"decoder.query_pos_head.layers.(\d+).(weight|bias)": r"model.decoder.query_pos_head.layers.\1.\2",
r"decoder.enc_output.proj.(weight|bias)": r"model.enc_output.0.\1",
r"decoder.enc_output.norm.(weight|bias)": r"model.enc_output.1.\1",
r"decoder.enc_score_head.(weight|bias)": r"model.enc_score_head.\1",
r"decoder.enc_bbox_head.layers.(\d+).(weight|bias)": r"model.enc_bbox_head.layers.\1.\2",
}
def convert_old_keys_to_new_keys(state_dict_keys: dict | None = None):
# Use the mapping to rename keys
for original_key, converted_key in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
for key in list(state_dict_keys.keys()):
new_key = re.sub(original_key, converted_key, key)
if new_key != key:
state_dict_keys[new_key] = state_dict_keys.pop(key)
return state_dict_keys
def read_in_q_k_v(state_dict, config, model_name):
prefix = ""
encoder_hidden_dim = config.encoder_hidden_dim
# first: transformer encoder
for i in range(config.encoder_layers):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_weight")
in_proj_bias = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.weight"] = in_proj_weight[
:encoder_hidden_dim, :
]
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.bias"] = in_proj_bias[:encoder_hidden_dim]
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.weight"] = in_proj_weight[
encoder_hidden_dim : 2 * encoder_hidden_dim, :
]
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.bias"] = in_proj_bias[
encoder_hidden_dim : 2 * encoder_hidden_dim
]
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.weight"] = in_proj_weight[
-encoder_hidden_dim:, :
]
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.bias"] = in_proj_bias[-encoder_hidden_dim:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_weight", None)
in_proj_bias = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_bias", None)
# next, add query, keys and values (in that order) to the state dict
if model_name in ["dfine_n_coco", "dfine_n_obj2coco_e25", "dfine_n_obj365"]:
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:128, :]
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:128]
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[128:256, :]
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[128:256]
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-128:, :]
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-128:]
else:
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
with httpx.stream("GET", url) as response:
image = Image.open(BytesIO(response.read()))
return image
@torch.no_grad()
def convert_d_fine_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, repo_id):
"""
Copy/paste/tweak model's weights to our D-FINE structure.
"""
# load default config
config = get_d_fine_config(model_name)
state_dict = load_original_state_dict(repo_id, model_name)
state_dict.pop("decoder.valid_mask", None)
state_dict.pop("decoder.anchors", None)
model = DFineForObjectDetection(config)
logger.info(f"Converting model {model_name}...")
state_dict = convert_old_keys_to_new_keys(state_dict)
state_dict.pop("decoder.model.decoder.up", None)
state_dict.pop("decoder.model.decoder.reg_scale", None)
# query, key and value matrices need special treatment
read_in_q_k_v(state_dict, config, model_name)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
for key in state_dict.copy():
if key.endswith("num_batches_tracked"):
del state_dict[key]
# for two_stage
if "bbox_embed" in key or ("class_embed" in key and "denoising_" not in key):
state_dict[key.split("model.decoder.")[-1]] = state_dict[key]
# finally, create HuggingFace model and load state dict
model.load_state_dict(state_dict)
model.eval()
# load image processor
image_processor = RTDetrImageProcessor()
# prepare image
img = prepare_img()
# preprocess image
transformations = transforms.Compose(
[
transforms.Resize([640, 640], interpolation=transforms.InterpolationMode.BILINEAR),
transforms.ToTensor(),
]
)
original_pixel_values = transformations(img).unsqueeze(0) # insert batch dimension
encoding = image_processor(images=img, return_tensors="pt")
pixel_values = encoding["pixel_values"]
assert torch.allclose(original_pixel_values, pixel_values)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
pixel_values = pixel_values.to(device)
outputs = model(pixel_values)
if model_name == "dfine_x_coco":
expected_slice_logits = torch.tensor(
[
[-4.844723, -4.7293096, -4.5971327],
[-4.554266, -4.61723, -4.627926],
[-4.3934402, -4.6064143, -4.139952],
]
)
expected_slice_boxes = torch.tensor(
[
[0.2565248, 0.5477609, 0.47644863],
[0.7690029, 0.41423926, 0.46148556],
[0.1688096, 0.19923759, 0.21118002],
]
)
elif model_name == "dfine_x_obj2coco":
expected_slice_logits = torch.tensor(
[
[-4.230433, -6.6295037, -4.8339615],
[-4.085411, -6.3280816, -4.695468],
[-3.8968022, -6.336813, -4.67051],
]
)
expected_slice_boxes = torch.tensor(
[
[0.25707328, 0.54842496, 0.47624254],
[0.76967394, 0.41272867, 0.45970756],
[0.16882066, 0.19918433, 0.2112098],
]
)
elif model_name == "dfine_x_obj365":
expected_slice_logits = torch.tensor(
[
[-6.3844957, -3.7549126, -4.6873264],
[-5.8433194, -3.4490552, -3.3228905],
[-6.5314736, -3.7856622, -4.895984],
]
)
expected_slice_boxes = torch.tensor(
[
[0.7703046, 0.41329497, 0.45932162],
[0.16898105, 0.19876392, 0.21050783],
[0.25134972, 0.5517619, 0.4864124],
]
)
elif model_name == "dfine_m_coco":
expected_slice_logits = torch.tensor(
[
[-4.5187078, -4.71708, -4.117749],
[-4.513984, -4.937715, -3.829125],
[-4.830042, -6.931682, -3.1740026],
]
)
expected_slice_boxes = torch.tensor(
[
[0.25851426, 0.5489963, 0.4757598],
[0.769683, 0.41411665, 0.45988125],
[0.16866133, 0.19921188, 0.21207744],
]
)
elif model_name == "dfine_m_obj2coco":
expected_slice_logits = torch.tensor(
[
[-4.520666, -7.6678333, -5.739887],
[-4.5053635, -7.510611, -5.452532],
[-4.70348, -5.6098466, -5.0199957],
]
)
expected_slice_boxes = torch.tensor(
[
[0.2567608, 0.5485795, 0.4767465],
[0.77035284, 0.41236404, 0.4580645],
[0.5498525, 0.27548885, 0.05886984],
]
)
elif model_name == "dfine_m_obj365":
expected_slice_logits = torch.tensor(
[
[-5.770525, -3.1610885, -5.2807794],
[-5.7809954, -3.768266, -5.1146393],
[-6.180705, -3.7357295, -3.1651964],
]
)
expected_slice_boxes = torch.tensor(
[
[0.2529114, 0.5526663, 0.48270613],
[0.7712474, 0.41294736, 0.457174],
[0.5497157, 0.27588123, 0.05813372],
]
)
elif model_name == "dfine_l_coco":
expected_slice_logits = torch.tensor(
[
[-4.068779, -5.169955, -4.339212],
[-3.9461594, -5.0279613, -4.0161457],
[-4.218292, -6.196324, -5.175245],
]
)
expected_slice_boxes = torch.tensor(
[
[0.2564867, 0.5489948, 0.4748876],
[0.7693534, 0.4138953, 0.4598034],
[0.16875696, 0.19875404, 0.21196914],
]
)
elif model_name == "dfine_l_obj365":
expected_slice_logits = torch.tensor(
[
[-5.7953215, -3.4901116, -5.4394145],
[-5.7032104, -3.671125, -5.76121],
[-6.09466, -3.1512096, -4.285499],
]
)
expected_slice_boxes = torch.tensor(
[
[0.7693825, 0.41265628, 0.4606362],
[0.25306237, 0.55187637, 0.4832178],
[0.16892478, 0.19880727, 0.21115331],
]
)
elif model_name == "dfine_l_obj2coco_e25":
expected_slice_logits = torch.tensor(
[
[-3.6098495, -6.633563, -5.1227236],
[-3.682696, -6.9178205, -5.414557],
[-4.491674, -6.0823426, -4.5718226],
]
)
expected_slice_boxes = torch.tensor(
[
[0.7697078, 0.41368833, 0.45879585],
[0.2573691, 0.54856044, 0.47715297],
[0.16895264, 0.19871138, 0.2115552],
]
)
elif model_name == "dfine_n_coco":
expected_slice_logits = torch.tensor(
[
[-3.7827945, -5.0889463, -4.8341026],
[-5.3046904, -6.2801714, -2.9276395],
[-4.497901, -5.2670407, -6.2380104],
]
)
expected_slice_boxes = torch.tensor(
[
[0.73334837, 0.4270624, 0.39424777],
[0.1680235, 0.1988639, 0.21031213],
[0.25370035, 0.5534435, 0.48496848],
]
)
elif model_name == "dfine_s_coco":
expected_slice_logits = torch.tensor(
[
[-3.8097816, -4.7724586, -5.994499],
[-5.2974715, -9.499067, -6.1653666],
[-5.3502765, -3.9530406, -6.3630295],
]
)
expected_slice_boxes = torch.tensor(
[
[0.7677696, 0.41479152, 0.46441072],
[0.16912134, 0.19869131, 0.2123824],
[0.2581653, 0.54818195, 0.47512347],
]
)
elif model_name == "dfine_s_obj2coco":
expected_slice_logits = torch.tensor(
[
[-6.0208125, -7.532673, -5.0572147],
[-3.3595953, -9.057545, -6.376975],
[-4.3203554, -9.546032, -6.075504],
]
)
expected_slice_boxes = torch.tensor(
[
[0.16901012, 0.19883151, 0.21121952],
[0.76784194, 0.41266578, 0.46402973],
[00.2563128, 0.54797643, 0.47937632],
]
)
elif model_name == "dfine_s_obj365":
expected_slice_logits = torch.tensor(
[
[-6.3807316, -4.320986, -6.4775343],
[-6.5818424, -3.5009093, -5.75824],
[-5.748005, -4.3228016, -4.003726],
]
)
expected_slice_boxes = torch.tensor(
[
[0.2532072, 0.5491191, 0.48222217],
[0.76586807, 0.41175705, 0.46789962],
[0.169111, 0.19844547, 0.21069047],
]
)
else:
raise ValueError(f"Unknown d_fine_name: {model_name}")
assert torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits.to(outputs.logits.device), atol=1e-3)
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes.to(outputs.pred_boxes.device), atol=1e-4)
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
# Upload model, image processor and config to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
config.push_to_hub(
repo_id=repo_id,
commit_message="Add config from convert_d_fine_original_pytorch_checkpoint_to_hf.py",
)
model.push_to_hub(
repo_id=repo_id,
commit_message="Add model from convert_d_fine_original_pytorch_checkpoint_to_hf.py",
)
image_processor.push_to_hub(
repo_id=repo_id,
commit_message="Add image processor from convert_d_fine_original_pytorch_checkpoint_to_hf.py",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="dfine_s_coco",
type=str,
help="model_name of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
parser.add_argument(
"--repo_id",
type=str,
help="repo_id where the model will be pushed to.",
)
args = parser.parse_args()
convert_d_fine_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.repo_id)
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/d_fine/convert_d_fine_original_pytorch_checkpoint_to_hf.py",
"license": "Apache License 2.0",
"lines": 644,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/d_fine/modular_d_fine.py | # Copyright 2025 Baidu Inc and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from ... import initialization as init
from ...activations import ACT2CLS
from ...backbone_utils import consolidate_backbone_kwargs_to_config
from ...configuration_utils import PreTrainedConfig
from ...image_transforms import corners_to_center_format
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, logging, torch_compilable_check
from ..auto import AutoConfig
from ..rt_detr.modeling_rt_detr import (
RTDetrAIFILayer,
RTDetrConvNormLayer,
RTDetrDecoder,
RTDetrDecoderLayer,
RTDetrDecoderOutput,
RTDetrEncoderLayer,
RTDetrForObjectDetection,
RTDetrFrozenBatchNorm2d,
RTDetrHybridEncoder,
RTDetrMLPPredictionHead,
RTDetrModel,
RTDetrPreTrainedModel,
RTDetrRepVggBlock,
inverse_sigmoid,
)
from ..rt_detr_v2.modeling_rt_detr_v2 import multi_scale_deformable_attention_v2
logger = logging.get_logger(__name__)
# TODO: Attribute map assignment logic should be fixed in modular
# as well as super() call parsing because otherwise we cannot re-write args after initialization
class DFineConfig(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`DFineModel`]. It is used to instantiate a D-FINE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of D-FINE-X-COCO "[ustc-community/dfine-xlarge-coco"](https://huggingface.co/ustc-community/dfine-xlarge-coco").
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `HGNetV2Config()`):
The configuration of the backbone model.
freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):
Whether to freeze the batch normalization layers in the backbone.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
Dimension of the layers in hybrid encoder.
encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):
Multi level features input for encoder.
feat_strides (`list[int]`, *optional*, defaults to `[8, 16, 32]`):
Strides used in each feature map.
encoder_layers (`int`, *optional*, defaults to 1):
Total of layers to be used by the encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
encode_proj_layers (`list[int]`, *optional*, defaults to `[2]`):
Indexes of the projected layers to be used in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The temperature parameter used to create the positional encodings.
encoder_activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
activation_function (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the general layer. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride.
normalize_before (`bool`, *optional*, defaults to `False`):
Determine whether to apply layer normalization in the transformer encoder layer before self-attention and
feed-forward modules.
hidden_expansion (`float`, *optional*, defaults to 1.0):
Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers exclude hybrid encoder.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries.
decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):
Multi level features dimension for decoder
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of input feature levels.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_denoising (`int`, *optional*, defaults to 100):
The total number of denoising tasks or queries to be used for contrastive denoising.
label_noise_ratio (`float`, *optional*, defaults to 0.5):
The fraction of denoising labels to which random noise should be added.
box_noise_scale (`float`, *optional*, defaults to 1.0):
Scale or magnitude of noise to be added to the bounding boxes.
learn_initial_query (`bool`, *optional*, defaults to `False`):
Indicates whether the initial query embeddings for the decoder should be learned during training
anchor_image_size (`tuple[int, int]`, *optional*):
Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.
with_box_refine (`bool`, *optional*, defaults to `True`):
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
based on the predictions from the previous layer.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the architecture has an encoder decoder structure.
matcher_alpha (`float`, *optional*, defaults to 0.25):
Parameter alpha used by the Hungarian Matcher.
matcher_gamma (`float`, *optional*, defaults to 2.0):
Parameter gamma used by the Hungarian Matcher.
matcher_class_cost (`float`, *optional*, defaults to 2.0):
The relative weight of the class loss used by the Hungarian Matcher.
matcher_bbox_cost (`float`, *optional*, defaults to 5.0):
The relative weight of the bounding box loss used by the Hungarian Matcher.
matcher_giou_cost (`float`, *optional*, defaults to 2.0):
The relative weight of the giou loss of used by the Hungarian Matcher.
use_focal_loss (`bool`, *optional*, defaults to `True`):
Parameter informing if focal focal should be used.
auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
focal_loss_alpha (`float`, *optional*, defaults to 0.75):
Parameter alpha used to compute the focal loss.
focal_loss_gamma (`float`, *optional*, defaults to 2.0):
Parameter gamma used to compute the focal loss.
weight_loss_vfl (`float`, *optional*, defaults to 1.0):
Relative weight of the varifocal loss in the object detection loss.
weight_loss_bbox (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 bounding box loss in the object detection loss.
weight_loss_giou (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss in the object detection loss.
weight_loss_fgl (`float`, *optional*, defaults to 0.15):
Relative weight of the fine-grained localization loss in the object detection loss.
weight_loss_ddf (`float`, *optional*, defaults to 1.5):
Relative weight of the decoupled distillation focal loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.0001):
Relative classification weight of the 'no-object' class in the object detection loss.
eval_idx (`int`, *optional*, defaults to -1):
Index of the decoder layer to use for evaluation. If negative, counts from the end
(e.g., -1 means use the last layer). This allows for early prediction in the decoder
stack while still training later layers.
layer_scale (`float`, *optional*, defaults to `1.0`):
Scaling factor for the hidden dimension in later decoder layers. Used to adjust the
model capacity after the evaluation layer.
max_num_bins (`int`, *optional*, defaults to 32):
Maximum number of bins for the distribution-guided bounding box refinement.
Higher values allow for more fine-grained localization but increase computation.
reg_scale (`float`, *optional*, defaults to 4.0):
Scale factor for the regression distribution. Controls the range and granularity
of the bounding box refinement process.
depth_mult (`float`, *optional*, defaults to 1.0):
Multiplier for the number of blocks in RepNCSPELAN4 layers. Used to scale the model's
depth while maintaining its architecture.
top_prob_values (`int`, *optional*, defaults to 4):
Number of top probability values to consider from each corner's distribution.
lqe_hidden_dim (`int`, *optional*, defaults to 64):
Hidden dimension size for the Location Quality Estimator (LQE) network.
lqe_layers (`int`, *optional*, defaults to 2):
Number of layers in the Location Quality Estimator MLP.
decoder_offset_scale (`float`, *optional*, defaults to 0.5):
Offset scale used in deformable attention.
decoder_method (`str`, *optional*, defaults to `"default"`):
The method to use for the decoder: `"default"` or `"discrete"`.
up (`float`, *optional*, defaults to 0.5):
Controls the upper bounds of the Weighting Function.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
"""
model_type = "d_fine"
sub_configs = {"backbone_config": AutoConfig}
layer_types = ["basic", "bottleneck"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
initializer_range=0.01,
initializer_bias_prior_prob=None,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
freeze_backbone_batch_norms=True,
# encoder HybridEncoder
encoder_hidden_dim=256,
encoder_in_channels=[512, 1024, 2048],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
hidden_expansion=1.0,
# decoder DFineTransformer
d_model=256,
num_queries=300,
decoder_in_channels=[256, 256, 256],
decoder_ffn_dim=1024,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=6,
decoder_attention_heads=8,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=100,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
with_box_refine=True,
is_encoder_decoder=True,
# Loss
matcher_alpha=0.25,
matcher_gamma=2.0,
matcher_class_cost=2.0,
matcher_bbox_cost=5.0,
matcher_giou_cost=2.0,
use_focal_loss=True,
auxiliary_loss=True,
focal_loss_alpha=0.75,
focal_loss_gamma=2.0,
weight_loss_vfl=1.0,
weight_loss_bbox=5.0,
weight_loss_giou=2.0,
weight_loss_fgl=0.15,
weight_loss_ddf=1.5,
eos_coefficient=1e-4,
eval_idx=-1,
layer_scale=1,
max_num_bins=32,
reg_scale=4.0,
depth_mult=1.0,
top_prob_values=4,
lqe_hidden_dim=64,
lqe_layers=2,
decoder_offset_scale=0.5,
decoder_method="default",
up=0.5,
tie_word_embeddings=True,
**kwargs,
):
self.initializer_range = initializer_range
self.initializer_bias_prior_prob = initializer_bias_prior_prob
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
backbone_config, kwargs = consolidate_backbone_kwargs_to_config(
backbone_config=backbone_config,
default_config_type="hgnet_v2",
default_config_kwargs={"out_indices": [2, 3, 4]},
**kwargs,
)
self.backbone_config = backbone_config
self.freeze_backbone_batch_norms = freeze_backbone_batch_norms
# encoder
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.encoder_attention_heads = encoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.encoder_layers = encoder_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.eval_size = eval_size
self.normalize_before = normalize_before
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.hidden_expansion = hidden_expansion
# decoder
self.d_model = d_model
self.num_queries = num_queries
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_in_channels = decoder_in_channels
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.auxiliary_loss = auxiliary_loss
self.with_box_refine = with_box_refine
# Loss
self.matcher_alpha = matcher_alpha
self.matcher_gamma = matcher_gamma
self.matcher_class_cost = matcher_class_cost
self.matcher_bbox_cost = matcher_bbox_cost
self.matcher_giou_cost = matcher_giou_cost
self.use_focal_loss = use_focal_loss
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
self.weight_loss_vfl = weight_loss_vfl
self.weight_loss_bbox = weight_loss_bbox
self.weight_loss_giou = weight_loss_giou
self.weight_loss_fgl = weight_loss_fgl
self.weight_loss_ddf = weight_loss_ddf
self.eos_coefficient = eos_coefficient
# add the new attributes with the given values or defaults
self.eval_idx = eval_idx
self.layer_scale = layer_scale
self.max_num_bins = max_num_bins
self.reg_scale = reg_scale
self.depth_mult = depth_mult
self.decoder_offset_scale = decoder_offset_scale
self.decoder_method = decoder_method
self.top_prob_values = top_prob_values
self.lqe_hidden_dim = lqe_hidden_dim
self.lqe_layers = lqe_layers
self.up = up
self.tie_word_embeddings = tie_word_embeddings
if isinstance(self.decoder_n_points, list):
if len(self.decoder_n_points) != self.num_feature_levels:
raise ValueError(
f"Length of decoder_n_points list ({len(self.decoder_n_points)}) must match num_feature_levels ({self.num_feature_levels})."
)
head_dim = self.d_model // self.decoder_attention_heads
if head_dim * self.decoder_attention_heads != self.d_model:
raise ValueError(
f"Embedded dimension {self.d_model} must be divisible by decoder_attention_heads {self.decoder_attention_heads}"
)
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
class DFineDecoderOutput(RTDetrDecoderOutput):
pass
def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:
"""
Generates the non-uniform Weighting Function W(n) for bounding box regression.
Args:
max_num_bins (int): Max number of the discrete bins.
up (Tensor): Controls upper bounds of the sequence,
where maximum offset is ±up * H / W.
reg_scale (float): Controls the curvature of the Weighting Function.
Larger values result in flatter weights near the central axis W(max_num_bins/2)=0
and steeper weights at both ends.
Returns:
Tensor: Sequence of Weighting Function.
"""
upper_bound1 = abs(up[0]) * abs(reg_scale)
upper_bound2 = abs(up[0]) * abs(reg_scale) * 2
step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))
left_values = [-((step) ** i) + 1 for i in range(max_num_bins // 2 - 1, 0, -1)]
right_values = [(step) ** i - 1 for i in range(1, max_num_bins // 2)]
values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]
values = torch.cat(values, 0)
return values
def distance2bbox(points, distance: torch.Tensor, reg_scale: float) -> torch.Tensor:
"""
Decodes edge-distances into bounding box coordinates.
Args:
points (`torch.Tensor`):
(batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]
distance (`torch.Tensor`):
(batch_size, num_boxes, 4) or (num_boxes, 4), representing distances from the point to the left, top, right, and bottom boundaries.
reg_scale (`float`):
Controls the curvature of the Weighting Function.
Returns:
`torch.Tensor`: Bounding boxes in (batch_size, num_boxes, 4) or (num_boxes, 4) format, representing [x_center, y_center, width, height]
"""
reg_scale = abs(reg_scale)
top_left_x = points[..., 0] - (0.5 * reg_scale + distance[..., 0]) * (points[..., 2] / reg_scale)
top_left_y = points[..., 1] - (0.5 * reg_scale + distance[..., 1]) * (points[..., 3] / reg_scale)
bottom_right_x = points[..., 0] + (0.5 * reg_scale + distance[..., 2]) * (points[..., 2] / reg_scale)
bottom_right_y = points[..., 1] + (0.5 * reg_scale + distance[..., 3]) * (points[..., 3] / reg_scale)
bboxes = torch.stack([top_left_x, top_left_y, bottom_right_x, bottom_right_y], -1)
return corners_to_center_format(bboxes)
class DFineMLP(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, act: str = "relu"):
super().__init__()
self.num_layers = num_layers
hidden_dims = [hidden_dim] * (num_layers - 1)
input_dims = [input_dim] + hidden_dims
output_dims = hidden_dims + [output_dim]
self.layers = nn.ModuleList(nn.Linear(in_dim, out_dim) for in_dim, out_dim in zip(input_dims, output_dims))
self.act = ACT2CLS[act]()
def forward(self, stat_features: torch.Tensor) -> torch.Tensor:
for i, layer in enumerate(self.layers):
stat_features = self.act(layer(stat_features)) if i < self.num_layers - 1 else layer(stat_features)
return stat_features
class DFineGate(nn.Module):
def __init__(self, d_model: int):
super().__init__()
self.gate = nn.Linear(2 * d_model, 2 * d_model)
self.norm = nn.LayerNorm(d_model)
def forward(self, second_residual: torch.Tensor, hidden_states: torch.Tensor) -> torch.Tensor:
gate_input = torch.cat([second_residual, hidden_states], dim=-1)
gates = torch.sigmoid(self.gate(gate_input))
gate1, gate2 = gates.chunk(2, dim=-1)
hidden_states = self.norm(gate1 * second_residual + gate2 * hidden_states)
return hidden_states
class DFineFrozenBatchNorm2d(RTDetrFrozenBatchNorm2d):
pass
class DFineMultiscaleDeformableAttention(nn.Module):
def __init__(self, config: DFineConfig):
"""
D-Fine version of multiscale deformable attention
"""
super().__init__()
self.d_model = config.d_model
self.n_heads = config.decoder_attention_heads
self.n_levels = config.num_feature_levels
self.offset_scale = config.decoder_offset_scale
self.decoder_method = config.decoder_method
self.n_points = config.decoder_n_points
if isinstance(self.n_points, list):
num_points_list = self.n_points
else:
num_points_list = [self.n_points for _ in range(self.n_levels)]
self.num_points_list = num_points_list
num_points_scale = [1 / n for n in self.num_points_list for _ in range(n)]
self.register_buffer("num_points_scale", torch.tensor(num_points_scale, dtype=torch.float32))
self.total_points = self.n_heads * sum(self.num_points_list)
self.sampling_offsets = nn.Linear(self.d_model, self.total_points * 2)
self.attention_weights = nn.Linear(self.d_model, self.total_points)
self.ms_deformable_attn_core = multi_scale_deformable_attention_v2
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
reference_points=None,
encoder_hidden_states=None,
spatial_shapes=None,
spatial_shapes_list=None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
torch_compilable_check(
(spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == sequence_length,
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states",
)
# Reshape for multi-head attention
value = encoder_hidden_states.reshape(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
if attention_mask is not None:
value = value.masked_fill(~attention_mask[..., None], float(0))
sampling_offsets: torch.Tensor = self.sampling_offsets(hidden_states)
sampling_offsets = sampling_offsets.reshape(
batch_size, num_queries, self.n_heads, sum(self.num_points_list), 2
)
attention_weights = self.attention_weights(hidden_states).reshape(
batch_size, num_queries, self.n_heads, sum(self.num_points_list)
)
attention_weights = F.softmax(attention_weights, dim=-1)
if reference_points.shape[-1] == 2:
offset_normalizer = torch.tensor(spatial_shapes)
offset_normalizer = offset_normalizer.flip([1]).reshape(1, 1, 1, self.n_levels, 1, 2)
sampling_locations = (
reference_points.reshape(batch_size, sequence_length, 1, self.n_levels, 1, 2)
+ sampling_offsets / offset_normalizer
)
elif reference_points.shape[-1] == 4:
# reference_points [8, 480, None, 1, 4]
# sampling_offsets [8, 480, 8, 12, 2]
num_points_scale = self.num_points_scale.to(dtype=hidden_states.dtype).unsqueeze(-1)
offset = sampling_offsets * num_points_scale * reference_points[:, :, None, :, 2:] * self.offset_scale
sampling_locations = reference_points[:, :, None, :, :2] + offset
else:
raise ValueError(
f"Last dim of reference_points must be 2 or 4, but get {reference_points.shape[-1]} instead."
)
output = self.ms_deformable_attn_core(
value,
spatial_shapes_list,
sampling_locations,
attention_weights,
self.num_points_list,
self.decoder_method,
)
return output, attention_weights
class DFineConvNormLayer(RTDetrConvNormLayer):
def __init__(
self,
config: DFineConfig,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
groups: int = 1,
padding: int | None = None,
activation: str | None = None,
):
super().__init__(config, in_channels, out_channels, kernel_size, stride, padding=None, activation=activation)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
groups=groups,
padding=(kernel_size - 1) // 2 if padding is None else padding,
bias=False,
)
class DFineRepVggBlock(RTDetrRepVggBlock):
def __init__(self, config: DFineConfig, in_channels: int, out_channels: int):
super().__init__(config)
hidden_channels = in_channels
self.conv1 = DFineConvNormLayer(config, hidden_channels, out_channels, 3, 1, padding=1)
self.conv2 = DFineConvNormLayer(config, hidden_channels, out_channels, 1, 1, padding=0)
class DFineCSPRepLayer(nn.Module):
"""
Cross Stage Partial (CSP) network layer with RepVGG blocks.
"""
def __init__(
self, config: DFineConfig, in_channels: int, out_channels: int, num_blocks: int, expansion: float = 1.0
):
super().__init__()
activation = config.activation_function
hidden_channels = int(out_channels * expansion)
self.conv1 = DFineConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation)
self.conv2 = DFineConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation)
self.bottlenecks = nn.ModuleList(
[DFineRepVggBlock(config, hidden_channels, hidden_channels) for _ in range(num_blocks)]
)
if hidden_channels != out_channels:
self.conv3 = DFineConvNormLayer(config, hidden_channels, out_channels, 1, 1, activation=activation)
else:
self.conv3 = nn.Identity()
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state_1 = self.conv1(hidden_state)
for bottleneck in self.bottlenecks:
hidden_state_1 = bottleneck(hidden_state_1)
hidden_state_2 = self.conv2(hidden_state)
hidden_state_3 = self.conv3(hidden_state_1 + hidden_state_2)
return hidden_state_3
class DFineRepNCSPELAN4(nn.Module):
def __init__(self, config: DFineConfig, act: str = "silu", numb_blocks: int = 3):
super().__init__()
conv1_dim = config.encoder_hidden_dim * 2
conv2_dim = config.encoder_hidden_dim
conv3_dim = config.encoder_hidden_dim * 2
conv4_dim = round(config.hidden_expansion * config.encoder_hidden_dim // 2)
self.conv_dim = conv3_dim // 2
self.conv1 = DFineConvNormLayer(config, conv1_dim, conv3_dim, 1, 1, activation=act)
self.csp_rep1 = DFineCSPRepLayer(config, conv3_dim // 2, conv4_dim, num_blocks=numb_blocks)
self.conv2 = DFineConvNormLayer(config, conv4_dim, conv4_dim, 3, 1, activation=act)
self.csp_rep2 = DFineCSPRepLayer(config, conv4_dim, conv4_dim, num_blocks=numb_blocks)
self.conv3 = DFineConvNormLayer(config, conv4_dim, conv4_dim, 3, 1, activation=act)
self.conv4 = DFineConvNormLayer(config, conv3_dim + (2 * conv4_dim), conv2_dim, 1, 1, activation=act)
def forward(self, input_features: torch.Tensor) -> torch.Tensor:
# Split initial features into two branches after first convolution
split_features = list(self.conv1(input_features).split((self.conv_dim, self.conv_dim), 1))
# Process branches sequentially
branch1 = self.csp_rep1(split_features[-1])
branch1 = self.conv2(branch1)
branch2 = self.csp_rep2(branch1)
branch2 = self.conv3(branch2)
split_features.extend([branch1, branch2])
merged_features = torch.cat(split_features, 1)
merged_features = self.conv4(merged_features)
return merged_features
class DFineSCDown(nn.Module):
def __init__(self, config: DFineConfig, kernel_size: int, stride: int):
super().__init__()
self.conv1 = DFineConvNormLayer(config, config.encoder_hidden_dim, config.encoder_hidden_dim, 1, 1)
self.conv2 = DFineConvNormLayer(
config,
config.encoder_hidden_dim,
config.encoder_hidden_dim,
kernel_size,
stride,
config.encoder_hidden_dim,
)
def forward(self, input_features: torch.Tensor) -> torch.Tensor:
input_features = self.conv1(input_features)
input_features = self.conv2(input_features)
return input_features
class DFineEncoderLayer(RTDetrEncoderLayer):
def __init__(self, config: DFineConfig):
super().__init__(config)
self.mlp = DFineMLP(
self.hidden_size, config.encoder_ffn_dim, self.hidden_size, 2, config.encoder_activation_function
)
class DFineAIFILayer(RTDetrAIFILayer):
pass
class DFineIntegral(nn.Module):
"""
A static layer that calculates integral results from a distribution.
This layer computes the target location using the formula: `sum{Pr(n) * W(n)}`,
where Pr(n) is the softmax probability vector representing the discrete
distribution, and W(n) is the non-uniform Weighting Function.
Args:
max_num_bins (int): Max number of the discrete bins. Default is 32.
It can be adjusted based on the dataset or task requirements.
"""
def __init__(self, config: DFineConfig):
super().__init__()
self.max_num_bins = config.max_num_bins
def forward(self, pred_corners: torch.Tensor, project: torch.Tensor) -> torch.Tensor:
batch_size, num_queries, _ = pred_corners.shape
pred_corners = F.softmax(pred_corners.reshape(-1, self.max_num_bins + 1), dim=1)
pred_corners = F.linear(pred_corners, project.to(pred_corners.device)).reshape(-1, 4)
pred_corners = pred_corners.reshape(batch_size, num_queries, -1)
return pred_corners
class DFineLQE(nn.Module):
def __init__(self, config: DFineConfig):
super().__init__()
self.top_prob_values = config.top_prob_values
self.max_num_bins = config.max_num_bins
self.reg_conf = DFineMLP(4 * (self.top_prob_values + 1), config.lqe_hidden_dim, 1, config.lqe_layers)
def forward(self, scores: torch.Tensor, pred_corners: torch.Tensor) -> torch.Tensor:
batch_size, length, _ = pred_corners.size()
prob = F.softmax(pred_corners.reshape(batch_size, length, 4, self.max_num_bins + 1), dim=-1)
prob_topk, _ = prob.topk(self.top_prob_values, dim=-1)
stat = torch.cat([prob_topk, prob_topk.mean(dim=-1, keepdim=True)], dim=-1)
quality_score = self.reg_conf(stat.reshape(batch_size, length, -1))
scores = scores + quality_score
return scores
class DFineDecoderLayer(RTDetrDecoderLayer):
def __init__(self, config: DFineConfig):
super().__init__(config)
# override the encoder attention module with d-fine version
self.encoder_attn = DFineMultiscaleDeformableAttention(config=config)
# gate
self.gateway = DFineGate(config.d_model)
self.mlp = DFineMLP(
self.hidden_size, config.decoder_ffn_dim, self.hidden_size, 2, config.decoder_activation_function
)
del self.encoder_attn_layer_norm
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor | None = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
encoder_hidden_states: torch.Tensor | None = None,
encoder_attention_mask: torch.Tensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
# Cross-Attention
hidden_states = hidden_states if position_embeddings is None else hidden_states + position_embeddings
hidden_states, _ = self.encoder_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.gateway(residual, hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states.clamp(min=-65504, max=65504))
return hidden_states
class DFineMLPPredictionHead(RTDetrMLPPredictionHead):
pass
class DFinePreTrainedModel(RTDetrPreTrainedModel):
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
# initialize linear layer bias value according to a given probability value.
if isinstance(module, (DFineForObjectDetection, DFineDecoder)):
if module.class_embed is not None:
for layer in module.class_embed:
prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1)
bias = float(-math.log((1 - prior_prob) / prior_prob))
init.xavier_uniform_(layer.weight)
init.constant_(layer.bias, bias)
if module.bbox_embed is not None:
for layer in module.bbox_embed:
init.constant_(layer.layers[-1].weight, 0)
init.constant_(layer.layers[-1].bias, 0)
if hasattr(module, "reg_scale"):
init.constant_(module.reg_scale, self.config.reg_scale)
if hasattr(module, "up"):
init.constant_(module.up, self.config.up)
if isinstance(module, DFineMultiscaleDeformableAttention):
init.constant_(module.sampling_offsets.weight, 0.0)
default_dtype = torch.get_default_dtype()
thetas = torch.arange(module.n_heads, dtype=torch.int64).to(default_dtype) * (
2.0 * math.pi / module.n_heads
)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = grid_init / grid_init.abs().max(-1, keepdim=True).values
grid_init = grid_init.reshape(module.n_heads, 1, 2).tile([1, sum(module.num_points_list), 1])
scaling = torch.concat([torch.arange(1, n + 1) for n in module.num_points_list]).reshape(1, -1, 1)
grid_init *= scaling
init.copy_(module.sampling_offsets.bias, grid_init.flatten())
init.constant_(module.attention_weights.weight, 0.0)
init.constant_(module.attention_weights.bias, 0.0)
num_points_scale = [1 / n for n in module.num_points_list for _ in range(n)]
init.copy_(module.num_points_scale, torch.tensor(num_points_scale, dtype=torch.float32))
if isinstance(module, DFineModel):
prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1)
bias = float(-math.log((1 - prior_prob) / prior_prob))
init.xavier_uniform_(module.enc_score_head.weight)
init.constant_(module.enc_score_head.bias, bias)
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
if getattr(module, "running_mean", None) is not None:
init.zeros_(module.running_mean)
init.ones_(module.running_var)
init.zeros_(module.num_batches_tracked)
if isinstance(module, DFineGate):
bias = float(-math.log((1 - 0.5) / 0.5))
init.constant_(module.gate.bias, bias)
init.constant_(module.gate.weight, 0)
if isinstance(module, DFineLQE):
init.constant_(module.reg_conf.layers[-1].bias, 0)
init.constant_(module.reg_conf.layers[-1].weight, 0)
if isinstance(module, nn.LayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
if hasattr(module, "weight_embedding") and self.config.learn_initial_query:
init.xavier_uniform_(module.weight_embedding.weight)
if hasattr(module, "denoising_class_embed") and self.config.num_denoising > 0:
init.xavier_uniform_(module.denoising_class_embed.weight)
class DFineHybridEncoder(RTDetrHybridEncoder):
def __init__(self, config: DFineConfig):
DFinePreTrainedModel.__init__(config)
self.config = config
self.in_channels = config.encoder_in_channels
self.num_fpn_stages = len(self.in_channels) - 1
self.feat_strides = config.feat_strides
self.encoder_hidden_dim = config.encoder_hidden_dim
self.encode_proj_layers = config.encode_proj_layers
self.positional_encoding_temperature = config.positional_encoding_temperature
self.eval_size = config.eval_size
self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels]
self.out_strides = self.feat_strides
# AIFI (Attention-based Intra-scale Feature Interaction) layers
self.aifi = nn.ModuleList([DFineAIFILayer(config) for _ in range(len(self.encode_proj_layers))])
# top-down fpn
self.lateral_convs = nn.ModuleList()
self.fpn_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1, 0, -1):
lateral_layer = DFineConvNormLayer(config, self.encoder_hidden_dim, self.encoder_hidden_dim, 1, 1)
self.lateral_convs.append(lateral_layer)
num_blocks = round(3 * config.depth_mult)
fpn_layer = DFineRepNCSPELAN4(config, numb_blocks=num_blocks)
self.fpn_blocks.append(fpn_layer)
# bottom-up pan
self.downsample_convs = nn.ModuleList()
self.pan_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1):
self.downsample_convs.append(DFineSCDown(config, 3, 2))
num_blocks = round(3 * config.depth_mult)
self.pan_blocks.append(DFineRepNCSPELAN4(config, numb_blocks=num_blocks))
self.post_init()
class DFineDecoder(RTDetrDecoder):
"""
D-FINE Decoder implementing Fine-grained Distribution Refinement (FDR).
This decoder refines object detection predictions through iterative updates across multiple layers,
utilizing attention mechanisms, location quality estimators, and distribution refinement techniques
to improve bounding box accuracy and robustness.
"""
def __init__(self, config: DFineConfig):
self.eval_idx = config.eval_idx if config.eval_idx >= 0 else config.decoder_layers + config.eval_idx
super().__init__(config=config)
self.reg_scale = nn.Parameter(torch.tensor([config.reg_scale]), requires_grad=False)
self.max_num_bins = config.max_num_bins
self.d_model = config.d_model
self.layer_scale = config.layer_scale
self.pre_bbox_head = DFineMLP(config.hidden_size, config.hidden_size, 4, 3)
self.integral = DFineIntegral(config)
self.num_head = config.decoder_attention_heads
self.up = nn.Parameter(torch.tensor([config.up]), requires_grad=False)
self.lqe_layers = nn.ModuleList([DFineLQE(config) for _ in range(config.decoder_layers)])
self.layers = nn.ModuleList(
[DFineDecoderLayer(config) for _ in range(config.decoder_layers)]
+ [DFineDecoderLayer(config) for _ in range(config.decoder_layers - self.eval_idx - 1)]
)
def forward(
self,
encoder_hidden_states: torch.Tensor,
reference_points: torch.Tensor,
inputs_embeds: torch.Tensor,
spatial_shapes,
level_start_index=None,
spatial_shapes_list=None,
encoder_attention_mask=None,
memory_mask=None,
**kwargs: Unpack[TransformersKwargs],
) -> DFineDecoderOutput:
if inputs_embeds is not None:
hidden_states = inputs_embeds
# decoder layers
intermediate = ()
intermediate_reference_points = ()
intermediate_logits = ()
intermediate_predicted_corners = ()
initial_reference_points = ()
output_detach = pred_corners_undetach = 0
project = weighting_function(self.max_num_bins, self.up, self.reg_scale)
ref_points_detach = F.sigmoid(reference_points)
for i, decoder_layer in enumerate(self.layers):
ref_points_input = ref_points_detach.unsqueeze(2)
query_pos_embed = self.query_pos_head(ref_points_detach).clamp(min=-10, max=10)
hidden_states = decoder_layer(
hidden_states,
position_embeddings=query_pos_embed,
reference_points=ref_points_input,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
if i == 0:
# Initial bounding box predictions with inverse sigmoid refinement
new_reference_points = F.sigmoid(
self.pre_bbox_head(hidden_states) + inverse_sigmoid(ref_points_detach)
)
ref_points_initial = new_reference_points.detach()
# Refine bounding box corners using FDR, integrating previous layer's corrections
if self.bbox_embed is not None:
pred_corners = self.bbox_embed[i](hidden_states + output_detach) + pred_corners_undetach
inter_ref_bbox = distance2bbox(
ref_points_initial, self.integral(pred_corners, project), self.reg_scale
)
pred_corners_undetach = pred_corners
ref_points_detach = inter_ref_bbox.detach()
output_detach = hidden_states.detach()
intermediate += (hidden_states,)
if self.class_embed is not None and (self.training or i == self.eval_idx):
scores = self.class_embed[i](hidden_states)
# Add initial logits and reference points with pre-bbox head
if i == 0:
intermediate_logits += (scores,)
intermediate_reference_points += (new_reference_points,)
# Lqe does not affect the performance here.
scores = self.lqe_layers[i](scores, pred_corners)
intermediate_logits += (scores,)
intermediate_reference_points += (inter_ref_bbox,)
initial_reference_points += (ref_points_initial,)
intermediate_predicted_corners += (pred_corners,)
# Keep batch_size as first dimension
intermediate = torch.stack(intermediate)
if self.class_embed is not None and self.bbox_embed is not None:
intermediate_logits = torch.stack(intermediate_logits, dim=1)
intermediate_predicted_corners = torch.stack(intermediate_predicted_corners, dim=1)
initial_reference_points = torch.stack(initial_reference_points, dim=1)
intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
return DFineDecoderOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate,
intermediate_logits=intermediate_logits,
intermediate_reference_points=intermediate_reference_points,
intermediate_predicted_corners=intermediate_predicted_corners,
initial_reference_points=initial_reference_points,
)
class DFineModel(RTDetrModel):
def __init__(self, config: DFineConfig):
super().__init__(config)
del self.decoder_input_proj
self.encoder = DFineHybridEncoder(config=config)
num_backbone_outs = len(config.decoder_in_channels)
decoder_input_proj = []
in_channels = config.decoder_in_channels[-1]
for _ in range(num_backbone_outs):
if config.hidden_size == config.decoder_in_channels[-1]:
decoder_input_proj.append(nn.Identity())
else:
conv = nn.Conv2d(in_channels, config.d_model, kernel_size=1, bias=False)
batchnorm = nn.BatchNorm2d(config.d_model, config.batch_norm_eps)
decoder_input_proj.append(nn.Sequential(conv, batchnorm))
for _ in range(config.num_feature_levels - num_backbone_outs):
if config.hidden_size == config.decoder_in_channels[-1]:
decoder_input_proj.append(nn.Identity())
else:
conv = nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1, bias=False)
batchnorm = nn.BatchNorm2d(config.d_model, config.batch_norm_eps)
decoder_input_proj.append(nn.Sequential(conv, batchnorm))
self.decoder_input_proj = nn.ModuleList(decoder_input_proj)
self.decoder = DFineDecoder(config)
class DFineForObjectDetection(RTDetrForObjectDetection):
# When using clones, all layers > 0 will be clones, but layer 0 *is* required
# We can't initialize the model on meta device as some weights are modified during the initialization
_no_split_modules = None
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": r"bbox_embed.0",
r"class_embed.(?![0])\d+": r"^class_embed.0",
"class_embed": "model.decoder.class_embed",
"bbox_embed": "model.decoder.bbox_embed",
}
def __init__(self, config: DFineConfig):
DFinePreTrainedModel.__init__(self, config)
# D-FINE encoder-decoder model
self.eval_idx = config.eval_idx if config.eval_idx >= 0 else config.decoder_layers + config.eval_idx
self.model = DFineModel(config)
scaled_dim = round(config.layer_scale * config.hidden_size)
num_pred = config.decoder_layers
self.class_embed = nn.ModuleList([nn.Linear(config.d_model, config.num_labels) for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList(
[
DFineMLP(config.hidden_size, config.hidden_size, 4 * (config.max_num_bins + 1), 3)
for _ in range(self.eval_idx + 1)
]
+ [
DFineMLP(scaled_dim, scaled_dim, 4 * (config.max_num_bins + 1), 3)
for _ in range(config.decoder_layers - self.eval_idx - 1)
]
)
self.model.decoder.class_embed = self.class_embed
self.model.decoder.bbox_embed = self.bbox_embed
# Initialize weights and apply final processing
self.post_init()
def forward(**super_kwargs):
r"""
Example:
```python
>>> import torch
>>> from transformers.image_utils import load_image
>>> from transformers import AutoImageProcessor, DFineForObjectDetection
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> image_processor = AutoImageProcessor.from_pretrained("ustc-community/dfine-xlarge-coco")
>>> model = DFineForObjectDetection.from_pretrained("ustc-community/dfine-xlarge-coco")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 300, 80]
>>> boxes = outputs.pred_boxes
>>> list(boxes.shape)
[1, 300, 4]
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)
>>> result = results[0] # first image in batch
>>> for score, label, box in zip(result["scores"], result["labels"], result["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected cat with confidence 0.958 at location [344.49, 23.4, 639.84, 374.27]
Detected cat with confidence 0.956 at location [11.71, 53.52, 316.64, 472.33]
Detected remote with confidence 0.947 at location [40.46, 73.7, 175.62, 117.57]
Detected sofa with confidence 0.918 at location [0.59, 1.88, 640.25, 474.74]
```
"""
super().forward(**super_kwargs)
__all__ = [
"DFineConfig",
"DFineModel",
"DFinePreTrainedModel",
"DFineForObjectDetection",
]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/d_fine/modular_d_fine.py",
"license": "Apache License 2.0",
"lines": 995,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:src/transformers/models/hgnet_v2/modular_hgnet_v2.py | # Copyright 2025 Baidu Inc and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from ... import initialization as init
from ...backbone_utils import BackboneConfigMixin, BackboneMixin
from ...configuration_utils import PreTrainedConfig
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
auto_docstring,
)
from ..rt_detr.modeling_rt_detr_resnet import RTDetrResNetConvLayer
# TODO: Modular conversion for resnet must be fixed as
# it provides incorrect import for configuration like resnet_resnet
class HGNetV2Config(BackboneConfigMixin, PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`HGNetV2Backbone`]. It is used to instantiate a HGNet-V2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of D-FINE-X-COCO B4 "[ustc-community/dfine_x_coco"](https://huggingface.co/ustc-community/dfine_x_coco").
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
hidden_sizes (`list[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
stem_channels (`list[int]`, *optional*, defaults to `[3, 32, 48]`):
Channel dimensions for the stem layers:
- First number (3) is input image channels
- Second number (32) is intermediate stem channels
- Third number (48) is output stem channels
stage_in_channels (`list[int]`, *optional*, defaults to `[48, 128, 512, 1024]`):
Input channel dimensions for each stage of the backbone.
This defines how many channels the input to each stage will have.
stage_mid_channels (`list[int]`, *optional*, defaults to `[48, 96, 192, 384]`):
Mid-channel dimensions for each stage of the backbone.
This defines the number of channels used in the intermediate layers of each stage.
stage_out_channels (`list[int]`, *optional*, defaults to `[128, 512, 1024, 2048]`):
Output channel dimensions for each stage of the backbone.
This defines how many channels the output of each stage will have.
stage_num_blocks (`list[int]`, *optional*, defaults to `[1, 1, 3, 1]`):
Number of blocks to be used in each stage of the backbone.
This controls the depth of each stage by specifying how many convolutional blocks to stack.
stage_downsample (`list[bool]`, *optional*, defaults to `[False, True, True, True]`):
Indicates whether to downsample the feature maps at each stage.
If `True`, the spatial dimensions of the feature maps will be reduced.
stage_light_block (`list[bool]`, *optional*, defaults to `[False, False, True, True]`):
Indicates whether to use light blocks in each stage.
Light blocks are a variant of convolutional blocks that may have fewer parameters.
stage_kernel_size (`list[int]`, *optional*, defaults to `[3, 3, 5, 5]`):
Kernel sizes for the convolutional layers in each stage.
stage_numb_of_layers (`list[int]`, *optional*, defaults to `[6, 6, 6, 6]`):
Number of layers to be used in each block of the stage.
use_learnable_affine_block (`bool`, *optional*, defaults to `False`):
Whether to use Learnable Affine Blocks (LAB) in the network.
LAB adds learnable scale and bias parameters after certain operations.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "hgnet_v2"
def __init__(
self,
num_channels=3,
embedding_size=64,
depths=[3, 4, 6, 3],
hidden_sizes=[256, 512, 1024, 2048],
hidden_act="relu",
out_features=None,
out_indices=None,
stem_channels=[3, 32, 48],
stage_in_channels=[48, 128, 512, 1024],
stage_mid_channels=[48, 96, 192, 384],
stage_out_channels=[128, 512, 1024, 2048],
stage_num_blocks=[1, 1, 3, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[6, 6, 6, 6],
use_learnable_affine_block=False,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.embedding_size = embedding_size
self.depths = depths
self.hidden_sizes = hidden_sizes
self.hidden_act = hidden_act
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features)
self.stem_channels = stem_channels
self.stage_in_channels = stage_in_channels
self.stage_mid_channels = stage_mid_channels
self.stage_out_channels = stage_out_channels
self.stage_num_blocks = stage_num_blocks
self.stage_downsample = stage_downsample
self.stage_light_block = stage_light_block
self.stage_kernel_size = stage_kernel_size
self.stage_numb_of_layers = stage_numb_of_layers
self.use_learnable_affine_block = use_learnable_affine_block
self.initializer_range = initializer_range
if not (
len(stage_in_channels)
== len(stage_mid_channels)
== len(stage_out_channels)
== len(stage_num_blocks)
== len(stage_downsample)
== len(stage_light_block)
== len(stage_kernel_size)
== len(stage_numb_of_layers)
):
raise ValueError("All stage configuration lists must have the same length.")
# General docstring
@auto_docstring
class HGNetV2PreTrainedModel(PreTrainedModel):
config: HGNetV2Config
base_model_prefix = "hgnetv2"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["HGNetV2BasicLayer"]
def _init_weights(self, module):
super()._init_weights(module)
# We need to check it like that as d_fine models replace the BatchNorm2d by their own
if "BatchNorm" in module.__class__.__name__:
init.ones_(module.weight)
init.zeros_(module.bias)
init.zeros_(module.running_mean)
init.ones_(module.running_var)
class HGNetV2LearnableAffineBlock(nn.Module):
def __init__(self, scale_value: float = 1.0, bias_value: float = 0.0):
super().__init__()
self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True)
self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.scale * hidden_state + self.bias
return hidden_state
class HGNetV2ConvLayer(RTDetrResNetConvLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
groups: int = 1,
activation: str = "relu",
use_learnable_affine_block: bool = False,
):
super().__init__(in_channels, out_channels, kernel_size, stride, activation)
self.convolution = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
groups=groups,
padding=(kernel_size - 1) // 2,
bias=False,
)
if activation and use_learnable_affine_block:
self.lab = HGNetV2LearnableAffineBlock()
else:
self.lab = nn.Identity()
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.lab(hidden_state)
return hidden_state
class HGNetV2ConvLayerLight(nn.Module):
def __init__(
self, in_channels: int, out_channels: int, kernel_size: int, use_learnable_affine_block: bool = False
):
super().__init__()
self.conv1 = HGNetV2ConvLayer(
in_channels,
out_channels,
kernel_size=1,
activation=None,
use_learnable_affine_block=use_learnable_affine_block,
)
self.conv2 = HGNetV2ConvLayer(
out_channels,
out_channels,
kernel_size=kernel_size,
groups=out_channels,
use_learnable_affine_block=use_learnable_affine_block,
)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.conv1(hidden_state)
hidden_state = self.conv2(hidden_state)
return hidden_state
class HGNetV2Embeddings(nn.Module):
def __init__(self, config: HGNetV2Config):
super().__init__()
self.stem1 = HGNetV2ConvLayer(
config.stem_channels[0],
config.stem_channels[1],
kernel_size=3,
stride=2,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem2a = HGNetV2ConvLayer(
config.stem_channels[1],
config.stem_channels[1] // 2,
kernel_size=2,
stride=1,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem2b = HGNetV2ConvLayer(
config.stem_channels[1] // 2,
config.stem_channels[1],
kernel_size=2,
stride=1,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem3 = HGNetV2ConvLayer(
config.stem_channels[1] * 2,
config.stem_channels[1],
kernel_size=3,
stride=2,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem4 = HGNetV2ConvLayer(
config.stem_channels[1],
config.stem_channels[2],
kernel_size=1,
stride=1,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True)
self.num_channels = config.num_channels
def forward(self, pixel_values: Tensor) -> Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embedding = self.stem1(pixel_values)
embedding = F.pad(embedding, (0, 1, 0, 1))
emb_stem_2a = self.stem2a(embedding)
emb_stem_2a = F.pad(emb_stem_2a, (0, 1, 0, 1))
emb_stem_2a = self.stem2b(emb_stem_2a)
pooled_emb = self.pool(embedding)
embedding = torch.cat([pooled_emb, emb_stem_2a], dim=1)
embedding = self.stem3(embedding)
embedding = self.stem4(embedding)
return embedding
class HGNetV2BasicLayer(nn.Module):
def __init__(
self,
in_channels: int,
middle_channels: int,
out_channels: int,
layer_num: int,
kernel_size: int = 3,
residual: bool = False,
light_block: bool = False,
drop_path: float = 0.0,
use_learnable_affine_block: bool = False,
):
super().__init__()
self.residual = residual
self.layers = nn.ModuleList()
for i in range(layer_num):
temp_in_channels = in_channels if i == 0 else middle_channels
if light_block:
block = HGNetV2ConvLayerLight(
in_channels=temp_in_channels,
out_channels=middle_channels,
kernel_size=kernel_size,
use_learnable_affine_block=use_learnable_affine_block,
)
else:
block = HGNetV2ConvLayer(
in_channels=temp_in_channels,
out_channels=middle_channels,
kernel_size=kernel_size,
use_learnable_affine_block=use_learnable_affine_block,
stride=1,
)
self.layers.append(block)
# feature aggregation
total_channels = in_channels + layer_num * middle_channels
aggregation_squeeze_conv = HGNetV2ConvLayer(
total_channels,
out_channels // 2,
kernel_size=1,
stride=1,
use_learnable_affine_block=use_learnable_affine_block,
)
aggregation_excitation_conv = HGNetV2ConvLayer(
out_channels // 2,
out_channels,
kernel_size=1,
stride=1,
use_learnable_affine_block=use_learnable_affine_block,
)
self.aggregation = nn.Sequential(
aggregation_squeeze_conv,
aggregation_excitation_conv,
)
self.drop_path = nn.Dropout(drop_path) if drop_path else nn.Identity()
def forward(self, hidden_state: Tensor) -> Tensor:
identity = hidden_state
output = [hidden_state]
for layer in self.layers:
hidden_state = layer(hidden_state)
output.append(hidden_state)
hidden_state = torch.cat(output, dim=1)
hidden_state = self.aggregation(hidden_state)
if self.residual:
hidden_state = self.drop_path(hidden_state) + identity
return hidden_state
class HGNetV2Stage(nn.Module):
def __init__(self, config: HGNetV2Config, stage_index: int, drop_path: float = 0.0):
super().__init__()
in_channels = config.stage_in_channels[stage_index]
mid_channels = config.stage_mid_channels[stage_index]
out_channels = config.stage_out_channels[stage_index]
num_blocks = config.stage_num_blocks[stage_index]
num_layers = config.stage_numb_of_layers[stage_index]
downsample = config.stage_downsample[stage_index]
light_block = config.stage_light_block[stage_index]
kernel_size = config.stage_kernel_size[stage_index]
use_learnable_affine_block = config.use_learnable_affine_block
if downsample:
self.downsample = HGNetV2ConvLayer(
in_channels, in_channels, kernel_size=3, stride=2, groups=in_channels, activation=None
)
else:
self.downsample = nn.Identity()
blocks_list = []
for i in range(num_blocks):
blocks_list.append(
HGNetV2BasicLayer(
in_channels if i == 0 else out_channels,
mid_channels,
out_channels,
num_layers,
residual=(i != 0),
kernel_size=kernel_size,
light_block=light_block,
drop_path=drop_path,
use_learnable_affine_block=use_learnable_affine_block,
)
)
self.blocks = nn.ModuleList(blocks_list)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.downsample(hidden_state)
for block in self.blocks:
hidden_state = block(hidden_state)
return hidden_state
class HGNetV2Encoder(nn.Module):
def __init__(self, config: HGNetV2Config):
super().__init__()
self.stages = nn.ModuleList([])
for stage_index in range(len(config.stage_in_channels)):
resnet_stage = HGNetV2Stage(config, stage_index)
self.stages.append(resnet_stage)
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_state,
hidden_states=hidden_states,
)
class HGNetV2Backbone(BackboneMixin, HGNetV2PreTrainedModel):
has_attentions = False
def __init__(self, config: HGNetV2Config):
super().__init__(config)
self.depths = config.depths
self.num_features = [config.embedding_size] + config.hidden_sizes
self.embedder = HGNetV2Embeddings(config)
self.encoder = HGNetV2Encoder(config)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
**kwargs,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import HGNetV2Config, HGNetV2Backbone
>>> import torch
>>> config = HGNetV2Config()
>>> model = HGNetV2Backbone(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
@auto_docstring(
custom_intro="""
HGNetV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
class HGNetV2ForImageClassification(HGNetV2PreTrainedModel):
def __init__(self, config: HGNetV2Config):
super().__init__(config)
self.num_labels = config.num_labels
self.embedder = HGNetV2Embeddings(config)
self.encoder = HGNetV2Encoder(config)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten()
self.fc = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
# classification head
self.classifier = nn.ModuleList([self.avg_pool, self.flatten])
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
**kwargs,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import httpx
>>> from io import BytesIO
>>> from transformers import HGNetV2ForImageClassification, AutoImageProcessor
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> with httpx.stream("GET", url) as response:
... image = Image.open(BytesIO(response.read()))
>>> model = HGNetV2ForImageClassification.from_pretrained("ustc-community/hgnet-v2")
>>> processor = AutoImageProcessor.from_pretrained("ustc-community/hgnet-v2")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> outputs.logits.shape
torch.Size([1, 2])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
for layer in self.classifier:
last_hidden_state = layer(last_hidden_state)
logits = self.fc(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
__all__ = ["HGNetV2Config", "HGNetV2Backbone", "HGNetV2PreTrainedModel", "HGNetV2ForImageClassification"]
| {
"repo_id": "huggingface/transformers",
"file_path": "src/transformers/models/hgnet_v2/modular_hgnet_v2.py",
"license": "Apache License 2.0",
"lines": 535,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/transformers:tests/models/d_fine/test_modeling_d_fine.py | # coding = utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch D-FINE model."""
import copy
import inspect
import math
import tempfile
import unittest
from functools import cached_property
from parameterized import parameterized
from transformers import (
DFineConfig,
HGNetV2Config,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import DFineForObjectDetection, DFineModel
if is_vision_available():
from PIL import Image
from transformers import RTDetrImageProcessor
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
CHECKPOINT = "ustc-community/dfine-small-coco"
class DFineModelTester:
def __init__(
self,
parent,
batch_size=3,
is_training=True,
use_labels=True,
n_targets=3,
num_labels=10,
initializer_range=0.02,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
# encoder HybridEncoder
encoder_hidden_dim=32,
encoder_in_channels=[128, 256, 512],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=64,
encoder_attention_heads=2,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
# decoder DFineTransformer
d_model=32,
num_queries=30,
decoder_in_channels=[32, 32, 32],
decoder_ffn_dim=64,
num_feature_levels=3,
decoder_n_points=[3, 6, 3],
decoder_n_levels=3,
decoder_layers=2,
decoder_attention_heads=2,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=0,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
image_size=64,
disable_custom_kernels=True,
with_box_refine=True,
decoder_offset_scale=0.5,
eval_idx=-1,
layer_scale=1,
reg_max=32,
reg_scale=4.0,
depth_mult=0.34,
hidden_expansion=0.5,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = 3
self.is_training = is_training
self.use_labels = use_labels
self.n_targets = n_targets
self.num_labels = num_labels
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.backbone_config = backbone_config
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.eval_size = eval_size
self.normalize_before = normalize_before
self.d_model = d_model
self.num_queries = num_queries
self.decoder_in_channels = decoder_in_channels
self.decoder_ffn_dim = decoder_ffn_dim
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_n_levels = decoder_n_levels
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.decoder_offset_scale = decoder_offset_scale
self.eval_idx = eval_idx
self.layer_scale = layer_scale
self.reg_max = reg_max
self.reg_scale = reg_scale
self.depth_mult = depth_mult
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.with_box_refine = with_box_refine
self.hidden_expansion = hidden_expansion
self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
labels.append(target)
config = self.get_config()
config.num_labels = self.num_labels
return config, pixel_values, pixel_mask, labels
def get_config(self):
hidden_sizes = [64, 128, 256, 512]
backbone_config = HGNetV2Config(
stage_in_channels=[16, 64, 128, 256],
stage_mid_channels=[16, 32, 64, 128],
stage_out_channels=[64, 128, 256, 512],
stage_num_blocks=[1, 1, 2, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[3, 3, 3, 3],
embeddings_size=10,
hidden_sizes=hidden_sizes,
depths=[1, 1, 2, 1],
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
stem_channels=[3, 16, 16],
use_lab=True,
)
return DFineConfig(
backbone_config=backbone_config,
encoder_hidden_dim=self.encoder_hidden_dim,
encoder_in_channels=self.encoder_in_channels,
feat_strides=self.feat_strides,
encoder_layers=self.encoder_layers,
encoder_ffn_dim=self.encoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
dropout=self.dropout,
activation_dropout=self.activation_dropout,
encode_proj_layers=self.encode_proj_layers,
positional_encoding_temperature=self.positional_encoding_temperature,
encoder_activation_function=self.encoder_activation_function,
activation_function=self.activation_function,
eval_size=self.eval_size,
normalize_before=self.normalize_before,
d_model=self.d_model,
num_queries=self.num_queries,
decoder_in_channels=self.decoder_in_channels,
decoder_ffn_dim=self.decoder_ffn_dim,
num_feature_levels=self.num_feature_levels,
decoder_n_points=self.decoder_n_points,
decoder_n_levels=self.decoder_n_levels,
decoder_layers=self.decoder_layers,
decoder_attention_heads=self.decoder_attention_heads,
decoder_activation_function=self.decoder_activation_function,
decoder_offset_scale=self.decoder_offset_scale,
eval_idx=self.eval_idx,
layer_scale=self.layer_scale,
reg_max=self.reg_max,
reg_scale=self.reg_scale,
depth_mult=self.depth_mult,
attention_dropout=self.attention_dropout,
num_denoising=self.num_denoising,
label_noise_ratio=self.label_noise_ratio,
box_noise_scale=self.box_noise_scale,
learn_initial_query=self.learn_initial_query,
anchor_image_size=self.anchor_image_size,
image_size=self.image_size,
disable_custom_kernels=self.disable_custom_kernels,
with_box_refine=self.with_box_refine,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
def create_and_check_d_fine_model(self, config, pixel_values, pixel_mask, labels):
model = DFineModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model))
def create_and_check_d_fine_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = DFineForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
class DFineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DFineModel, DFineForObjectDetection) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": DFineModel, "object-detection": DFineForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "DFineForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = DFineModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=DFineConfig,
has_text_modality=False,
common_properties=["hidden_size", "num_attention_heads"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_d_fine_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_d_fine_model(*config_and_inputs)
def test_d_fine_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_d_fine_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="DFine doesn't work well with `nn.DataParallel")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="DFine does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="DFine does not use test_inputs_embeds_matches_input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="DFine does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="DFine does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="DFine does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="Weight tying is hardcoded (module_x = module_y) and always `True`")
def test_load_save_without_tied_weights(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
out_len = len(outputs)
correct_outlen = 15
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "DFineForObjectDetection":
correct_outlen += 2
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[
self.model_tester.decoder_attention_heads,
self.model_tester.num_queries,
self.model_tester.num_queries,
],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_queries,
self.model_tester.decoder_attention_heads,
self.model_tester.decoder_n_levels * self.model_tester.decoder_n_points
if isinstance(self.model_tester.decoder_n_points, int)
else sum(self.model_tester.decoder_n_points),
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
# DFine should maintin encoder_hidden_states output
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.encoder_attention_heads,
self.model_tester.encoder_seq_length,
self.model_tester.encoder_seq_length,
],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[1].shape[-2:]),
[
self.model_tester.image_size // self.model_tester.feat_strides[-1],
self.model_tester.image_size // self.model_tester.feat_strides[-1],
],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
)
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.num_queries, self.model_tester.d_model],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
# we take the first output since last_hidden_state is the first item
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_backbone_selection(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _validate_backbone_init(config):
for model_class in self.all_model_classes:
model = model_class(copy.deepcopy(config))
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "DFineForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
# These kwargs are all removed and are supported only for BC
# In new models we have only `backbone_config`. Let's test that there is no regression
# let's test a random timm backbone
config_dict = config.to_dict()
config_dict["encoder_in_channels"] = [24, 40, 432]
config_dict["backbone"] = "tf_mobilenetv3_small_075"
config_dict["backbone_config"] = None
config_dict["use_timm_backbone"] = True
config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]}
config = config.__class__(**config_dict)
_validate_backbone_init(config)
# Test a pretrained HF checkpoint as backbone
config_dict = config.to_dict()
config_dict["backbone"] = "microsoft/resnet-18"
config_dict["backbone_config"] = None
config_dict["use_timm_backbone"] = False
config_dict["use_pretrained_backbone"] = True
config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]}
config = config.__class__(**config_dict)
_validate_backbone_init(config)
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_with_different_dtypes(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device).to(dtype)
model.eval()
for key, tensor in inputs_dict.items():
if tensor.dtype == torch.float32:
inputs_dict[key] = tensor.to(dtype)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_accelerator
@slow
def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str):
dtype = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
}[dtype_str]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
h, w = inputs_dict["pixel_values"].shape[-2:]
# convert inputs to the desired dtype
for key, tensor in inputs_dict.items():
if tensor.dtype == torch.float32:
inputs_dict[key] = tensor.to(dtype)
for model_class in self.all_model_classes:
with tempfile.TemporaryDirectory() as tmpdirname:
model_class(config).save_pretrained(tmpdirname)
model_static = model_class.from_pretrained(
tmpdirname, anchor_image_size=[h, w], device_map=torch_device, dtype=dtype
).eval()
model_dynamic = model_class.from_pretrained(
tmpdirname, anchor_image_size=None, device_map=torch_device, dtype=dtype
).eval()
self.assertIsNotNone(model_static.config.anchor_image_size)
self.assertIsNone(model_dynamic.config.anchor_image_size)
with torch.no_grad():
outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class))
outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class))
torch.testing.assert_close(
outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4
)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class DFineModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None
def test_inference_object_detection_head(self):
model = DFineForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape_logits = torch.Size((1, 300, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expected_logits = torch.tensor(
[
[-3.8098, -4.7725, -5.9945],
[-5.2975, -9.4991, -6.1654],
[-5.3502, -3.9532, -6.3631],
]
).to(torch_device)
expected_boxes = torch.tensor(
[
[0.7678, 0.4148, 0.4644],
[0.1691, 0.1987, 0.2124],
[0.2582, 0.5482, 0.4751],
]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, atol=2e-4, rtol=2e-4)
expected_shape_boxes = torch.Size((1, 300, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=2e-4, rtol=2e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
)[0]
expected_scores = torch.tensor([0.9642, 0.9542, 0.9536, 0.8548], device=torch_device)
expected_labels = [15, 65, 15, 57]
expected_slice_boxes = torch.tensor(
[
[1.3186e01, 5.4130e01, 3.1727e02, 4.7212e02],
[4.0275e01, 7.2975e01, 1.7620e02, 1.1777e02],
[3.4276e02, 2.3428e01, 6.3998e02, 3.7477e02],
[5.8418e-01, 1.1794e00, 6.3933e02, 4.7486e02],
],
device=torch_device,
)
torch.testing.assert_close(results["scores"][:4], expected_scores, atol=1e-3, rtol=1e-4)
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes[:4], atol=1e-3, rtol=1e-4)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/d_fine/test_modeling_d_fine.py",
"license": "Apache License 2.0",
"lines": 634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/models/hgnet_v2/test_modeling_hgnet_v2.py | # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from transformers import HGNetV2Config
from transformers.testing_utils import require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
from transformers import HGNetV2Backbone, HGNetV2ForImageClassification
class HGNetV2ModelTester:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
embeddings_size=10,
hidden_sizes=[64, 128, 256, 512],
stage_in_channels=[16, 64, 128, 256],
stage_mid_channels=[16, 32, 64, 128],
stage_out_channels=[64, 128, 256, 512],
stage_num_blocks=[1, 1, 2, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[3, 3, 3, 3],
stem_channels=[3, 16, 16],
depths=[1, 1, 2, 1],
is_training=True,
use_labels=True,
hidden_act="relu",
num_labels=3,
scope=None,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.embeddings_size = embeddings_size
self.hidden_sizes = hidden_sizes
self.stage_in_channels = stage_in_channels
self.stage_mid_channels = stage_mid_channels
self.stage_out_channels = stage_out_channels
self.stage_num_blocks = stage_num_blocks
self.stage_downsample = stage_downsample
self.stage_light_block = stage_light_block
self.stage_kernel_size = stage_kernel_size
self.stage_numb_of_layers = stage_numb_of_layers
self.stem_channels = stem_channels
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return HGNetV2Config(
num_channels=self.num_channels,
embeddings_size=self.embeddings_size,
hidden_sizes=self.hidden_sizes,
stage_in_channels=self.stage_in_channels,
stage_mid_channels=self.stage_mid_channels,
stage_out_channels=self.stage_out_channels,
stage_num_blocks=self.stage_num_blocks,
stage_downsample=self.stage_downsample,
stage_light_block=self.stage_light_block,
stage_kernel_size=self.stage_kernel_size,
stage_numb_of_layers=self.stage_numb_of_layers,
stem_channels=self.stem_channels,
depths=self.depths,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_backbone(self, config, pixel_values, labels):
model = HGNetV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = HGNetV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = HGNetV2ForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class RTDetrResNetBackboneTest(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (HGNetV2Backbone,) if is_torch_available() else ()
has_attentions = False
config_class = HGNetV2Config
def setUp(self):
self.model_tester = HGNetV2ModelTester(self)
@require_torch
class HGNetV2ForImageClassificationTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some tests of test_modeling_common.py, as TextNet does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (HGNetV2ForImageClassification, HGNetV2Backbone) if is_torch_available() else ()
pipeline_model_mapping = {"image-classification": HGNetV2ForImageClassification} if is_torch_available() else {}
test_resize_embeddings = False
has_attentions = False
def setUp(self):
self.model_tester = HGNetV2ModelTester(self)
@unittest.skip(reason="HGNetV2 does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="HGNetV2 does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="HGNetV2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="HGNetV2 does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
self.assertEqual(len(hidden_states), self.model_tester.num_stages + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Retain_grad is not supposed to be tested")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="TextNet does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@unittest.skip(reason="HGNetV2 does not use model")
def test_model_from_pretrained(self):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/models/hgnet_v2/test_modeling_hgnet_v2.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:examples/modular-transformers/modular_test_detr.py | from transformers.models.deformable_detr.modeling_deformable_detr import DeformableDetrModel
# Here, the old and new model have by essence a common "detr" suffix. Make sure everything is correctly named
# in this case (i.e., we do not wrongly detect `Detr` as part of a suffix to remove)
class TestDetrModel(DeformableDetrModel):
pass
| {
"repo_id": "huggingface/transformers",
"file_path": "examples/modular-transformers/modular_test_detr.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
infiniflow/ragflow:api/apps/services/canvas_replica_service.py | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import random
import time
from api.db import CanvasCategory
from rag.utils.redis_conn import REDIS_CONN, RedisDistributedLock
class CanvasReplicaService:
"""
Manage per-user canvas runtime replicas stored in Redis.
Lifecycle:
- bootstrap: initialize/refresh replica from DB DSL
- load_for_run: read replica before run
- commit_after_run: atomically persist run result back to replica
"""
TTL_SECS = 3 * 60 * 60
REPLICA_KEY_PREFIX = "canvas:replica"
LOCK_KEY_PREFIX = "canvas:replica:lock"
LOCK_TIMEOUT_SECS = 10
LOCK_BLOCKING_TIMEOUT_SECS = 1
LOCK_RETRY_ATTEMPTS = 3
LOCK_RETRY_SLEEP_SECS = 0.2
@classmethod
def normalize_dsl(cls, dsl):
"""Normalize DSL to a JSON-serializable dict. Raise ValueError on invalid input."""
normalized = dsl
if isinstance(normalized, str):
try:
normalized = json.loads(normalized)
except Exception as e:
raise ValueError("Invalid DSL JSON string.") from e
if not isinstance(normalized, dict):
raise ValueError("DSL must be a JSON object.")
try:
return json.loads(json.dumps(normalized, ensure_ascii=False))
except Exception as e:
raise ValueError("DSL is not JSON-serializable.") from e
@classmethod
def _replica_key(cls, canvas_id: str, tenant_id: str, runtime_user_id: str) -> str:
return f"{cls.REPLICA_KEY_PREFIX}:{canvas_id}:{tenant_id}:{runtime_user_id}"
@classmethod
def _lock_key(cls, canvas_id: str, tenant_id: str, runtime_user_id: str) -> str:
return f"{cls.LOCK_KEY_PREFIX}:{canvas_id}:{tenant_id}:{runtime_user_id}"
@classmethod
def _read_payload(cls, replica_key: str):
"""Read replica payload from Redis; return None on missing/invalid content."""
cache_blob = REDIS_CONN.get(replica_key)
if not cache_blob:
return None
try:
payload = json.loads(cache_blob)
if not isinstance(payload, dict):
return None
payload["dsl"] = cls.normalize_dsl(payload.get("dsl", {}))
return payload
except Exception as e:
logging.warning("Failed to parse canvas replica %s: %s", replica_key, e)
return None
@classmethod
def _write_payload(cls, replica_key: str, payload: dict):
"""Write payload and refresh TTL."""
payload["updated_at"] = int(time.time())
REDIS_CONN.set_obj(replica_key, payload, cls.TTL_SECS)
@classmethod
def _build_payload(
cls,
canvas_id: str,
tenant_id: str,
runtime_user_id: str,
dsl,
canvas_category=CanvasCategory.Agent,
title="",
):
return {
"canvas_id": canvas_id,
"tenant_id": str(tenant_id),
"runtime_user_id": str(runtime_user_id),
"title": title or "",
"canvas_category": canvas_category or CanvasCategory.Agent,
"dsl": cls.normalize_dsl(dsl),
"updated_at": int(time.time()),
}
@classmethod
def create_if_absent(
cls,
canvas_id: str,
tenant_id: str,
runtime_user_id: str,
dsl,
canvas_category=CanvasCategory.Agent,
title="",
):
"""Create a runtime replica if it does not exist; otherwise keep existing state."""
replica_key = cls._replica_key(canvas_id, str(tenant_id), str(runtime_user_id))
payload = cls._read_payload(replica_key)
if payload:
return payload
payload = cls._build_payload(canvas_id, str(tenant_id), str(runtime_user_id), dsl, canvas_category, title)
cls._write_payload(replica_key, payload)
return payload
@classmethod
def bootstrap(
cls,
canvas_id: str,
tenant_id: str,
runtime_user_id: str,
dsl,
canvas_category=CanvasCategory.Agent,
title="",
):
"""Bootstrap replica by creating it when absent and keeping existing runtime state."""
return cls.create_if_absent(
canvas_id=canvas_id,
tenant_id=tenant_id,
runtime_user_id=runtime_user_id,
dsl=dsl,
canvas_category=canvas_category,
title=title,
)
@classmethod
def load_for_run(cls, canvas_id: str, tenant_id: str, runtime_user_id: str):
"""Load current runtime replica used by /completion."""
replica_key = cls._replica_key(canvas_id, str(tenant_id), str(runtime_user_id))
return cls._read_payload(replica_key)
@classmethod
def replace_for_set(
cls,
canvas_id: str,
tenant_id: str,
runtime_user_id: str,
dsl,
canvas_category=CanvasCategory.Agent,
title="",
):
"""Replace replica content for `/set` under lock."""
replica_key = cls._replica_key(canvas_id, str(tenant_id), str(runtime_user_id))
lock_key = cls._lock_key(canvas_id, str(tenant_id), str(runtime_user_id))
lock = cls._acquire_lock_with_retry(lock_key)
if not lock:
logging.error("Failed to acquire canvas replica lock after retry: %s", lock_key)
return False
try:
updated_payload = cls._build_payload(
canvas_id=canvas_id,
tenant_id=str(tenant_id),
runtime_user_id=str(runtime_user_id),
dsl=dsl,
canvas_category=canvas_category,
title=title,
)
cls._write_payload(replica_key, updated_payload)
return True
except Exception:
logging.exception("Failed to replace canvas replica from /set.")
return False
finally:
try:
lock.release()
except Exception:
logging.exception("Failed to release canvas replica lock: %s", lock_key)
@classmethod
def _acquire_lock_with_retry(cls, lock_key: str):
"""Acquire distributed lock with bounded retries; return lock object or None."""
lock = RedisDistributedLock(
lock_key,
timeout=cls.LOCK_TIMEOUT_SECS,
blocking_timeout=cls.LOCK_BLOCKING_TIMEOUT_SECS,
)
for idx in range(cls.LOCK_RETRY_ATTEMPTS):
if lock.acquire():
return lock
if idx < cls.LOCK_RETRY_ATTEMPTS - 1:
time.sleep(cls.LOCK_RETRY_SLEEP_SECS + random.uniform(0, 0.1))
return None
@classmethod
def commit_after_run(
cls,
canvas_id: str,
tenant_id: str,
runtime_user_id: str,
dsl,
canvas_category=CanvasCategory.Agent,
title="",
):
"""
Commit post-run DSL into replica.
Returns:
bool: True on committed/saved, False on commit failure.
"""
new_dsl = cls.normalize_dsl(dsl)
replica_key = cls._replica_key(canvas_id, str(tenant_id), str(runtime_user_id))
try:
latest_payload = cls._read_payload(replica_key)
# Always write latest runtime DSL back to Redis first.
updated_payload = cls._build_payload(
canvas_id=canvas_id,
tenant_id=str(tenant_id),
runtime_user_id=str(runtime_user_id),
dsl=new_dsl,
canvas_category=canvas_category if not latest_payload else (canvas_category or latest_payload.get("canvas_category", CanvasCategory.Agent)),
title=title if not latest_payload else (title or latest_payload.get("title", "")),
)
cls._write_payload(replica_key, updated_payload)
return True
except Exception:
logging.exception("Failed to commit canvas runtime replica.")
return False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "api/apps/services/canvas_replica_service.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:rag/utils/lazy_image.py | import logging
from io import BytesIO
from PIL import Image
from rag.nlp import concat_img
class LazyDocxImage:
def __init__(self, blobs, source=None):
self._blobs = [b for b in (blobs or []) if b]
self.source = source
self._pil = None
def __bool__(self):
return bool(self._blobs)
def to_pil(self):
if self._pil is not None:
try:
self._pil.load()
return self._pil
except Exception:
try:
self._pil.close()
except Exception:
pass
self._pil = None
res_img = None
for blob in self._blobs:
try:
image = Image.open(BytesIO(blob)).convert("RGB")
except Exception as e:
logging.info(f"LazyDocxImage: skip bad image blob: {e}")
continue
if res_img is None:
res_img = image
continue
new_img = concat_img(res_img, image)
if new_img is not res_img:
try:
res_img.close()
except Exception:
pass
try:
image.close()
except Exception:
pass
res_img = new_img
self._pil = res_img
return self._pil
def to_pil_detached(self):
pil = self.to_pil()
self._pil = None
return pil
def close(self):
if self._pil is not None:
try:
self._pil.close()
except Exception:
pass
self._pil = None
return None
def __getattr__(self, name):
pil = self.to_pil()
if pil is None:
raise AttributeError(name)
return getattr(pil, name)
def __array__(self, dtype=None):
import numpy as np
pil = self.to_pil()
if pil is None:
return np.array([], dtype=dtype)
return np.array(pil, dtype=dtype)
def __enter__(self):
return self.to_pil()
def __exit__(self, exc_type, exc, tb):
self.close()
return False
def ensure_pil_image(img):
if isinstance(img, Image.Image):
return img
if isinstance(img, LazyDocxImage):
return img.to_pil()
return None
def is_image_like(img):
return isinstance(img, Image.Image) or isinstance(img, LazyDocxImage)
def open_image_for_processing(img, *, allow_bytes=False):
if isinstance(img, Image.Image):
return img, False
if isinstance(img, LazyDocxImage):
return img.to_pil_detached(), True
if allow_bytes and isinstance(img, (bytes, bytearray)):
try:
pil = Image.open(BytesIO(img)).convert("RGB")
return pil, True
except Exception as e:
logging.info(f"open_image_for_processing: bad bytes: {e}")
return None, False
return img, False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "rag/utils/lazy_image.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
infiniflow/ragflow:test/testcases/test_http_api/test_file_management_within_dataset/test_metadata_batch_update.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from common import metadata_batch_update, list_documents, delete_documents, upload_documents
def _create_and_upload_in_batches(auth, dataset_id, num_docs, tmp_path, batch_size=100):
"""Create and upload documents in batches to avoid too many open files."""
document_ids = []
for batch_start in range(0, num_docs, batch_size):
batch_end = min(batch_start + batch_size, num_docs)
fps = []
for i in range(batch_start, batch_end):
fp = tmp_path / f"ragflow_test_upload_{i}.txt"
fp.write_text(f"Test document content {i}\n" * 10)
fps.append(fp)
res = upload_documents(auth, dataset_id, fps)
for doc in res["data"]:
document_ids.append(doc["id"])
return document_ids
@pytest.mark.p3
class TestMetadataBatchUpdate:
def test_batch_update_metadata(self, HttpApiAuth, add_dataset, ragflow_tmp_dir):
"""
Test batch_update_metadata via HTTP API.
This test calls the real batch_update_metadata on the server.
"""
dataset_id = add_dataset
# Upload documents in batches to avoid too many open files
document_ids = _create_and_upload_in_batches(HttpApiAuth, dataset_id, 1010, ragflow_tmp_dir)
# Update metadata via batch update API
updates = [{"key": "author", "value": "new_author"}, {"key": "status", "value": "processed"}]
res = metadata_batch_update(HttpApiAuth, dataset_id, {"selector": {"document_ids": document_ids}, "updates": updates})
# Verify the API call succeeded
assert res["code"] == 0, f"Expected code 0, got {res.get('code')}: {res.get('message')}"
assert res["data"]["updated"] == 1010, f"Expected 1100 documents updated, got {res['data']['updated']}"
# Verify metadata was updated for first and last few sample documents
sample_ids = document_ids[:5] + document_ids[-5:]
list_res = list_documents(HttpApiAuth, dataset_id, {"ids": sample_ids})
assert list_res["code"] == 0
for doc in list_res["data"]["docs"]:
assert doc["meta_fields"].get("author") == "new_author", f"Expected author='new_author', got {doc['meta_fields'].get('author')}"
assert doc["meta_fields"].get("status") == "processed", f"Expected status='processed', got {doc['meta_fields'].get('status')}"
# Cleanup
delete_documents(HttpApiAuth, dataset_id, {"ids": None})
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_file_management_within_dataset/test_metadata_batch_update.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_auth_app/test_oauth_client_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
import urllib.parse
from pathlib import Path
from types import ModuleType
import pytest
class _FakeResponse:
def __init__(self, payload=None, err=None):
self._payload = payload or {}
self._err = err
def raise_for_status(self):
if self._err:
raise self._err
def json(self):
return self._payload
def _base_config(scope="openid profile"):
return {
"client_id": "client-1",
"client_secret": "secret-1",
"authorization_url": "https://issuer.example/authorize",
"token_url": "https://issuer.example/token",
"userinfo_url": "https://issuer.example/userinfo",
"redirect_uri": "https://app.example/callback",
"scope": scope,
}
def _load_oauth_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
http_client_mod = ModuleType("common.http_client")
async def _default_async_request(*_args, **_kwargs):
return _FakeResponse({})
def _default_sync_request(*_args, **_kwargs):
return _FakeResponse({})
http_client_mod.async_request = _default_async_request
http_client_mod.sync_request = _default_sync_request
monkeypatch.setitem(sys.modules, "common.http_client", http_client_mod)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
apps_pkg = ModuleType("api.apps")
apps_pkg.__path__ = [str(repo_root / "api" / "apps")]
auth_pkg = ModuleType("api.apps.auth")
auth_pkg.__path__ = [str(repo_root / "api" / "apps" / "auth")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
monkeypatch.setitem(sys.modules, "api.apps", apps_pkg)
monkeypatch.setitem(sys.modules, "api.apps.auth", auth_pkg)
sys.modules.pop("api.apps.auth.oauth", None)
oauth_path = repo_root / "api" / "apps" / "auth" / "oauth.py"
oauth_spec = importlib.util.spec_from_file_location("api.apps.auth.oauth", oauth_path)
oauth_module = importlib.util.module_from_spec(oauth_spec)
monkeypatch.setitem(sys.modules, "api.apps.auth.oauth", oauth_module)
oauth_spec.loader.exec_module(oauth_module)
return oauth_module
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
@pytest.mark.p2
def test_oauth_client_sync_matrix_unit(monkeypatch):
oauth_module = _load_oauth_module(monkeypatch)
client = oauth_module.OAuthClient(_base_config())
assert client.client_id == "client-1"
assert client.client_secret == "secret-1"
assert client.authorization_url.endswith("/authorize")
assert client.token_url.endswith("/token")
assert client.userinfo_url.endswith("/userinfo")
assert client.redirect_uri.endswith("/callback")
assert client.scope == "openid profile"
assert client.http_request_timeout == 7
info = oauth_module.UserInfo("u@example.com", "user1", "User One", "avatar-url")
assert info.to_dict() == {
"email": "u@example.com",
"username": "user1",
"nickname": "User One",
"avatar_url": "avatar-url",
}
auth_url = client.get_authorization_url(state="s p/a?ce")
parsed = urllib.parse.urlparse(auth_url)
query = urllib.parse.parse_qs(parsed.query)
assert parsed.scheme == "https"
assert query["client_id"] == ["client-1"]
assert query["redirect_uri"] == ["https://app.example/callback"]
assert query["response_type"] == ["code"]
assert query["scope"] == ["openid profile"]
assert query["state"] == ["s p/a?ce"]
no_scope_client = oauth_module.OAuthClient(_base_config(scope=None))
no_scope_query = urllib.parse.parse_qs(urllib.parse.urlparse(no_scope_client.get_authorization_url()).query)
assert "scope" not in no_scope_query
call_log = []
def _sync_ok(method, url, data=None, headers=None, timeout=None):
call_log.append((method, url, data, headers, timeout))
if url.endswith("/token"):
return _FakeResponse({"access_token": "token-1"})
return _FakeResponse({"email": "user@example.com", "picture": "id-picture"})
monkeypatch.setattr(oauth_module, "sync_request", _sync_ok)
token = client.exchange_code_for_token("code-1")
assert token["access_token"] == "token-1"
user_info = client.fetch_user_info("access-1")
assert isinstance(user_info, oauth_module.UserInfo)
assert user_info.to_dict() == {
"email": "user@example.com",
"username": "user",
"nickname": "user",
"avatar_url": "id-picture",
}
assert call_log[0][0] == "POST"
assert call_log[0][3]["Accept"] == "application/json"
assert call_log[1][0] == "GET"
assert call_log[1][3]["Authorization"] == "Bearer access-1"
normalized = client.normalize_user_info(
{"email": "fallback@example.com", "username": "fallback-user", "nickname": "fallback-nick", "avatar_url": "direct-avatar"}
)
assert normalized.to_dict()["avatar_url"] == "direct-avatar"
monkeypatch.setattr(oauth_module, "sync_request", lambda *_args, **_kwargs: _FakeResponse(err=RuntimeError("status boom")))
with pytest.raises(ValueError, match="Failed to exchange authorization code for token: status boom"):
client.exchange_code_for_token("code-2")
with pytest.raises(ValueError, match="Failed to fetch user info: status boom"):
client.fetch_user_info("access-2")
@pytest.mark.p2
def test_oauth_client_async_matrix_unit(monkeypatch):
oauth_module = _load_oauth_module(monkeypatch)
client = oauth_module.OAuthClient(_base_config())
async def _async_ok(method, url, data=None, headers=None, **kwargs):
_ = (method, data, headers, kwargs.get("timeout"))
if url.endswith("/token"):
return _FakeResponse({"access_token": "token-async"})
return _FakeResponse({"email": "async@example.com", "username": "async-user", "nickname": "Async User", "avatar_url": "async-avatar"})
monkeypatch.setattr(oauth_module, "async_request", _async_ok)
token = asyncio.run(client.async_exchange_code_for_token("code-a"))
assert token["access_token"] == "token-async"
info = asyncio.run(client.async_fetch_user_info("async-token"))
assert info.to_dict() == {
"email": "async@example.com",
"username": "async-user",
"nickname": "Async User",
"avatar_url": "async-avatar",
}
async def _async_fail(*_args, **_kwargs):
return _FakeResponse(err=RuntimeError("async boom"))
monkeypatch.setattr(oauth_module, "async_request", _async_fail)
with pytest.raises(ValueError, match="Failed to exchange authorization code for token: async boom"):
asyncio.run(client.async_exchange_code_for_token("code-b"))
with pytest.raises(ValueError, match="Failed to fetch user info: async boom"):
asyncio.run(client.async_fetch_user_info("async-token-2"))
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_auth_app/test_oauth_client_unit.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_canvas_app/test_canvas_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import sys
from copy import deepcopy
from functools import partial
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if value is None or type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _StubHeaders:
def __init__(self):
self._items = []
def add_header(self, key, value):
self._items.append((key, value))
def get(self, key, default=None):
for existing_key, value in reversed(self._items):
if existing_key == key:
return value
return default
class _StubResponse:
def __init__(self, body, mimetype=None, content_type=None):
self.response = body
self.body = body
self.mimetype = mimetype
self.content_type = content_type
self.headers = _StubHeaders()
class _DummyRequest:
def __init__(self, *, headers=None, args=None, files=None, method="POST", content_length=0):
self.headers = headers or {}
self.args = args or _Args()
self.files = _AwaitableValue(files if files is not None else {})
self.method = method
self.content_length = content_length
class _DummyRetCode:
SUCCESS = 0
EXCEPTION_ERROR = 100
ARGUMENT_ERROR = 101
DATA_ERROR = 102
OPERATING_ERROR = 103
class _DummyCanvasCategory:
Agent = "agent"
DataFlow = "dataflow"
class _TaskField:
def __eq__(self, other):
return ("eq", other)
class _DummyTask:
doc_id = _TaskField()
class _FileMap(dict):
def getlist(self, key):
return list(self.get(key, []))
def _run(coro):
return asyncio.run(coro)
async def _collect_stream(body):
items = []
if hasattr(body, "__aiter__"):
async for item in body:
if isinstance(item, bytes):
item = item.decode("utf-8")
items.append(item)
else:
for item in body:
if isinstance(item, bytes):
item = item.decode("utf-8")
items.append(item)
return items
def _set_request_json(monkeypatch, module, payload):
async def _req():
return deepcopy(payload)
monkeypatch.setattr(module, "get_request_json", _req)
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_canvas_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
settings_mod = ModuleType("common.settings")
settings_mod.docStoreConn = SimpleNamespace(
index_exist=lambda *_args, **_kwargs: False,
delete=lambda *_args, **_kwargs: True,
)
common_pkg.settings = settings_mod
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
constants_mod = ModuleType("common.constants")
constants_mod.RetCode = _DummyRetCode
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.get_uuid = lambda: "uuid-1"
async def _thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
misc_utils_mod.thread_pool_exec = _thread_pool_exec
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = []
apps_mod.current_user = SimpleNamespace(id="user-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
apps_services_pkg = ModuleType("api.apps.services")
apps_services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.apps.services", apps_services_pkg)
apps_mod.services = apps_services_pkg
canvas_replica_mod = ModuleType("api.apps.services.canvas_replica_service")
class _StubCanvasReplicaService:
@classmethod
def normalize_dsl(cls, dsl):
import json
if isinstance(dsl, str):
return json.loads(dsl)
return dsl
@classmethod
def bootstrap(cls, *_args, **_kwargs):
return {}
@classmethod
def load_for_run(cls, *_args, **_kwargs):
return None
@classmethod
def commit_after_run(cls, *_args, **_kwargs):
return True
@classmethod
def replace_for_set(cls, *_args, **_kwargs):
return True
@classmethod
def create_if_absent(cls, *_args, **_kwargs):
return {}
canvas_replica_mod.CanvasReplicaService = _StubCanvasReplicaService
monkeypatch.setitem(sys.modules, "api.apps.services.canvas_replica_service", canvas_replica_mod)
apps_services_pkg.canvas_replica_service = canvas_replica_mod
db_pkg = ModuleType("api.db")
db_pkg.CanvasCategory = _DummyCanvasCategory
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
canvas_service_mod = ModuleType("api.db.services.canvas_service")
class _StubCanvasTemplateService:
@staticmethod
def get_all():
return []
class _StubUserCanvasService:
@staticmethod
def accessible(*_args, **_kwargs):
return True
@staticmethod
def delete_by_id(*_args, **_kwargs):
return True
@staticmethod
def query(*_args, **_kwargs):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def get_by_canvas_id(_canvas_id):
return True, {"id": _canvas_id}
@staticmethod
def get_by_id(_canvas_id):
return True, SimpleNamespace(
id=_canvas_id,
user_id="user-1",
dsl="{}",
canvas_category=_DummyCanvasCategory.Agent,
to_dict=lambda: {"id": _canvas_id},
)
@staticmethod
def get_by_tenant_ids(*_args, **_kwargs):
return [], 0
class _StubAPI4ConversationService:
@staticmethod
def get_names(*_args, **_kwargs):
return []
@staticmethod
def get_list(*_args, **_kwargs):
return 0, []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def get_by_id(_session_id):
return True, SimpleNamespace(to_dict=lambda: {"id": _session_id})
@staticmethod
def delete_by_id(*_args, **_kwargs):
return True
async def _completion(*_args, **_kwargs):
if False:
yield {}
canvas_service_mod.CanvasTemplateService = _StubCanvasTemplateService
canvas_service_mod.UserCanvasService = _StubUserCanvasService
canvas_service_mod.API4ConversationService = _StubAPI4ConversationService
canvas_service_mod.completion = _completion
monkeypatch.setitem(sys.modules, "api.db.services.canvas_service", canvas_service_mod)
document_service_mod = ModuleType("api.db.services.document_service")
document_service_mod.DocumentService = SimpleNamespace(
clear_chunk_num_when_rerun=lambda *_args, **_kwargs: True,
update_by_id=lambda *_args, **_kwargs: True,
)
monkeypatch.setitem(sys.modules, "api.db.services.document_service", document_service_mod)
file_service_mod = ModuleType("api.db.services.file_service")
file_service_mod.FileService = SimpleNamespace(
upload_info=lambda *_args, **_kwargs: {"ok": True},
get_blob=lambda *_args, **_kwargs: b"",
)
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
pipeline_log_service_mod = ModuleType("api.db.services.pipeline_operation_log_service")
pipeline_log_service_mod.PipelineOperationLogService = SimpleNamespace(
get_documents_info=lambda *_args, **_kwargs: [],
update_by_id=lambda *_args, **_kwargs: True,
)
monkeypatch.setitem(sys.modules, "api.db.services.pipeline_operation_log_service", pipeline_log_service_mod)
task_service_mod = ModuleType("api.db.services.task_service")
task_service_mod.queue_dataflow = lambda *_args, **_kwargs: (True, "")
task_service_mod.CANVAS_DEBUG_DOC_ID = "debug-doc"
task_service_mod.TaskService = SimpleNamespace(filter_delete=lambda *_args, **_kwargs: True)
monkeypatch.setitem(sys.modules, "api.db.services.task_service", task_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
user_service_mod.TenantService = SimpleNamespace(get_joined_tenants_by_user_id=lambda *_args, **_kwargs: [])
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
canvas_version_mod = ModuleType("api.db.services.user_canvas_version")
canvas_version_mod.UserCanvasVersionService = SimpleNamespace(
insert=lambda **_kwargs: True,
delete_all_versions=lambda *_args, **_kwargs: True,
list_by_canvas_id=lambda *_args, **_kwargs: [],
get_by_id=lambda *_args, **_kwargs: (True, None),
save_or_replace_latest=lambda *_args, **_kwargs: True,
build_version_title=lambda *_args, **_kwargs: "stub_version_title",
)
monkeypatch.setitem(sys.modules, "api.db.services.user_canvas_version", canvas_version_mod)
db_models_mod = ModuleType("api.db.db_models")
class _StubAPIToken:
@staticmethod
def query(**_kwargs):
return []
db_models_mod.APIToken = _StubAPIToken
db_models_mod.Task = _DummyTask
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
def _get_json_result(code=_DummyRetCode.SUCCESS, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _get_data_error_result(code=_DummyRetCode.DATA_ERROR, message="Sorry! Data missing!"):
return {"code": code, "message": message}
def _server_error_response(exc):
return {"code": _DummyRetCode.EXCEPTION_ERROR, "message": repr(exc), "data": None}
async def _get_request_json():
return {}
def _validate_request(*_args, **_kwargs):
def _decorator(func):
return func
return _decorator
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.server_error_response = _server_error_response
api_utils_mod.validate_request = _validate_request
api_utils_mod.get_data_error_result = _get_data_error_result
api_utils_mod.get_request_json = _get_request_json
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
rag_pkg = ModuleType("rag")
rag_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag", rag_pkg)
rag_flow_pkg = ModuleType("rag.flow")
rag_flow_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.flow", rag_flow_pkg)
pipeline_mod = ModuleType("rag.flow.pipeline")
class _StubPipeline:
def __init__(self, *_args, **_kwargs):
pass
pipeline_mod.Pipeline = _StubPipeline
monkeypatch.setitem(sys.modules, "rag.flow.pipeline", pipeline_mod)
rag_nlp_mod = ModuleType("rag.nlp")
rag_nlp_mod.search = SimpleNamespace(index_name=lambda tenant_id: f"idx-{tenant_id}")
monkeypatch.setitem(sys.modules, "rag.nlp", rag_nlp_mod)
rag_utils_pkg = ModuleType("rag.utils")
rag_utils_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.utils", rag_utils_pkg)
redis_mod = ModuleType("rag.utils.redis_conn")
redis_mod.REDIS_CONN = SimpleNamespace(
set=lambda *_args, **_kwargs: True,
get=lambda *_args, **_kwargs: None,
)
monkeypatch.setitem(sys.modules, "rag.utils.redis_conn", redis_mod)
agent_pkg = ModuleType("agent")
agent_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "agent", agent_pkg)
agent_component_mod = ModuleType("agent.component")
class _StubLLM:
pass
agent_component_mod.LLM = _StubLLM
monkeypatch.setitem(sys.modules, "agent.component", agent_component_mod)
agent_canvas_mod = ModuleType("agent.canvas")
class _StubCanvas:
def __init__(self, dsl, _user_id, _agent_id=None, canvas_id=None):
self.dsl = dsl
self.id = canvas_id
async def run(self, **_kwargs):
if False:
yield {}
def cancel_task(self):
return None
def reset(self):
return None
def get_component_input_form(self, _component_id):
return {}
def get_component(self, _component_id):
return {"obj": SimpleNamespace(reset=lambda: None, invoke=lambda **_kwargs: None, output=lambda: {})}
def __str__(self):
return "{}"
agent_canvas_mod.Canvas = _StubCanvas
monkeypatch.setitem(sys.modules, "agent.canvas", agent_canvas_mod)
quart_mod = ModuleType("quart")
quart_mod.request = _DummyRequest()
quart_mod.Response = _StubResponse
async def _make_response(blob):
return {"blob": blob}
quart_mod.make_response = _make_response
monkeypatch.setitem(sys.modules, "quart", quart_mod)
module_path = repo_root / "api" / "apps" / "canvas_app.py"
spec = importlib.util.spec_from_file_location("test_canvas_routes_unit_module", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, "test_canvas_routes_unit_module", module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_templates_rm_save_get_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
class _Template:
def __init__(self, template_id):
self.template_id = template_id
def to_dict(self):
return {"id": self.template_id}
monkeypatch.setattr(module.CanvasTemplateService, "get_all", lambda: [_Template("tpl-1")])
res = module.templates()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] == [{"id": "tpl-1"}]
_set_request_json(monkeypatch, module, {"canvas_ids": ["c1", "c2"]})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.rm)())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "Only owner of canvas authorized" in res["message"]
deleted = []
_set_request_json(monkeypatch, module, {"canvas_ids": ["c1", "c2"]})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "delete_by_id", lambda canvas_id: deleted.append(canvas_id))
res = _run(inspect.unwrap(module.rm)())
assert res["data"] is True
assert deleted == ["c1", "c2"]
_set_request_json(monkeypatch, module, {"title": " Demo ", "dsl": {"n": 1}})
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [object()])
res = _run(inspect.unwrap(module.save)())
assert res["code"] == module.RetCode.DATA_ERROR
assert "already exists" in res["message"]
_set_request_json(monkeypatch, module, {"title": "Demo", "dsl": {"n": 1}})
monkeypatch.setattr(module, "get_uuid", lambda: "canvas-new")
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.UserCanvasService, "save", lambda **_kwargs: False)
res = _run(inspect.unwrap(module.save)())
assert res["code"] == module.RetCode.DATA_ERROR
assert "Fail to save canvas." in res["message"]
created = {"save": [], "versions": []}
_set_request_json(monkeypatch, module, {"title": "Demo", "dsl": {"n": 1}})
monkeypatch.setattr(module, "get_uuid", lambda: "canvas-new")
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.UserCanvasService, "save", lambda **kwargs: created["save"].append(kwargs) or True)
monkeypatch.setattr(module.UserCanvasVersionService, "save_or_replace_latest", lambda *_args, **kwargs: created["versions"].append(("save_or_replace_latest", kwargs)))
res = _run(inspect.unwrap(module.save)())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["id"] == "canvas-new"
assert created["save"]
assert any(item[0] == "save_or_replace_latest" for item in created["versions"])
_set_request_json(monkeypatch, module, {"id": "canvas-1", "title": "Renamed", "dsl": "{\"m\": 1}"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.save)())
assert res["code"] == module.RetCode.OPERATING_ERROR
updates = []
versions = []
_set_request_json(monkeypatch, module, {"id": "canvas-1", "title": "Renamed", "dsl": "{\"m\": 1}"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "update_by_id", lambda canvas_id, payload: updates.append((canvas_id, payload)))
monkeypatch.setattr(module.UserCanvasVersionService, "save_or_replace_latest", lambda *_args, **kwargs: versions.append(("save_or_replace_latest", kwargs)))
res = _run(inspect.unwrap(module.save)())
assert res["code"] == module.RetCode.SUCCESS
assert updates and updates[0][0] == "canvas-1"
assert any(item[0] == "save_or_replace_latest" for item in versions)
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = module.get("canvas-1")
assert res["code"] == module.RetCode.DATA_ERROR
assert res["message"] == "canvas not found."
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "get_by_canvas_id", lambda _canvas_id: (True, {"id": "canvas-1"}))
res = module.get("canvas-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["id"] == "canvas-1"
@pytest.mark.p2
def test_getsse_auth_token_and_ownership_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Authorization": "Bearer"}))
res = module.getsse("canvas-1")
assert res["message"] == "Authorization is not valid!"
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Authorization": "Bearer invalid"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [])
res = module.getsse("canvas-1")
assert "API key is invalid" in res["message"]
monkeypatch.setattr(module, "request", _DummyRequest(headers={"Authorization": "Bearer ok"}))
monkeypatch.setattr(module.APIToken, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-1")])
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
res = module.getsse("canvas-1")
assert res["code"] == module.RetCode.OPERATING_ERROR
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [object()])
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (False, None))
res = module.getsse("canvas-1")
assert res["message"] == "canvas not found."
bad_owner = SimpleNamespace(user_id="tenant-2", to_dict=lambda: {"id": "canvas-1"})
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, bad_owner))
res = module.getsse("canvas-1")
assert res["message"] == "canvas not found."
good_owner = SimpleNamespace(user_id="tenant-1", to_dict=lambda: {"id": "canvas-1"})
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, good_owner))
res = module.getsse("canvas-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["id"] == "canvas-1"
@pytest.mark.p2
def test_run_dataflow_and_canvas_sse_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
async def _thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec)
_set_request_json(monkeypatch, module, {"id": "c1"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.run)())
assert res["code"] == module.RetCode.OPERATING_ERROR
_set_request_json(monkeypatch, module, {"id": "c1"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.CanvasReplicaService, "load_for_run", lambda *_args, **_kwargs: None)
res = _run(inspect.unwrap(module.run)())
assert res["message"] == "canvas replica not found, please call /get/<canvas_id> first."
pipeline_calls = []
monkeypatch.setattr(module, "Pipeline", lambda *args, **kwargs: pipeline_calls.append((args, kwargs)))
monkeypatch.setattr(module, "get_uuid", lambda: "task-1")
_set_request_json(monkeypatch, module, {"id": "df-1", "files": ["f1"], "user_id": "exp-1"})
monkeypatch.setattr(module.CanvasReplicaService, "load_for_run", lambda *_args, **_kwargs: {"dsl": {"n": 1}, "title": "df", "canvas_category": module.CanvasCategory.DataFlow})
monkeypatch.setattr(module, "queue_dataflow", lambda *_args, **_kwargs: (False, "queue failed"))
res = _run(inspect.unwrap(module.run)())
assert res["code"] == module.RetCode.DATA_ERROR
assert "queue failed" in res["message"]
assert pipeline_calls
_set_request_json(monkeypatch, module, {"id": "df-1", "files": ["f1"], "user_id": "exp-1"})
monkeypatch.setattr(module, "queue_dataflow", lambda *_args, **_kwargs: (True, ""))
res = _run(inspect.unwrap(module.run)())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["message_id"] == "task-1"
_set_request_json(monkeypatch, module, {"id": "ag-1", "query": "q", "files": [], "inputs": {}})
monkeypatch.setattr(module.CanvasReplicaService, "load_for_run", lambda *_args, **_kwargs: {"dsl": {"x": 1}, "title": "ag", "canvas_category": module.CanvasCategory.Agent})
monkeypatch.setattr(module, "Canvas", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("canvas init failed")))
res = _run(inspect.unwrap(module.run)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "canvas init failed" in res["message"]
updates = []
class _CanvasSSESuccess:
def __init__(self, *_args, **_kwargs):
self.cancelled = False
async def run(self, **_kwargs):
yield {"answer": "stream-ok"}
def cancel_task(self):
self.cancelled = True
def __str__(self):
return '{"updated": true}'
_set_request_json(monkeypatch, module, {"id": "ag-2", "query": "q", "files": [], "inputs": {}, "user_id": "exp-2"})
monkeypatch.setattr(module, "Canvas", _CanvasSSESuccess)
monkeypatch.setattr(module.CanvasReplicaService, "load_for_run", lambda *_args, **_kwargs: {"dsl": {}, "title": "ag2", "canvas_category": module.CanvasCategory.Agent})
monkeypatch.setattr(module.UserCanvasService, "update_by_id", lambda canvas_id, payload: updates.append((canvas_id, payload)))
resp = _run(inspect.unwrap(module.run)())
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
chunks = _run(_collect_stream(resp.response))
assert any('"answer": "stream-ok"' in chunk for chunk in chunks)
class _CanvasSSEError:
last_instance = None
def __init__(self, *_args, **_kwargs):
self.cancelled = False
_CanvasSSEError.last_instance = self
async def run(self, **_kwargs):
yield {"answer": "start"}
raise RuntimeError("stream boom")
def cancel_task(self):
self.cancelled = True
def __str__(self):
return "{}"
_set_request_json(monkeypatch, module, {"id": "ag-3", "query": "q", "files": [], "inputs": {}, "user_id": "exp-3"})
monkeypatch.setattr(module, "Canvas", _CanvasSSEError)
monkeypatch.setattr(module.CanvasReplicaService, "load_for_run", lambda *_args, **_kwargs: {"dsl": {}, "title": "ag3", "canvas_category": module.CanvasCategory.Agent})
resp = _run(inspect.unwrap(module.run)())
chunks = _run(_collect_stream(resp.response))
assert any('"code": 500' in chunk and "stream boom" in chunk for chunk in chunks)
assert _CanvasSSEError.last_instance.cancelled is True
@pytest.mark.p2
def test_exp_agent_completion_trace_and_filtering_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
_set_request_json(monkeypatch, module, {"return_trace": True})
async def _agent_completion(*_args, **_kwargs):
yield "data:not-json"
yield 'data:{"event":"node_finished","data":{"component_id":"cmp-1","step":"done"}}'
yield 'data:{"event":"heartbeat","data":{"t":1}}'
yield 'data:{"event":"message","data":{"content":"hello"}}'
yield 'data:{"event":"message_end","data":{"content":"bye"}}'
monkeypatch.setattr(module, "agent_completion", _agent_completion)
resp = _run(inspect.unwrap(module.exp_agent_completion)("canvas-1"))
assert isinstance(resp, _StubResponse)
assert resp.headers.get("Content-Type") == "text/event-stream; charset=utf-8"
chunks = _run(_collect_stream(resp.response))
assert any('"event": "node_finished"' in chunk and '"trace"' in chunk for chunk in chunks)
assert not any('"event":"heartbeat"' in chunk or '"event": "heartbeat"' in chunk for chunk in chunks)
assert any('"event":"message"' in chunk or '"event": "message"' in chunk for chunk in chunks)
assert chunks[-1] == "data:[DONE]\n\n"
@pytest.mark.p2
def test_rerun_and_cancel_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
_set_request_json(monkeypatch, module, {"id": "flow-1", "dsl": {"n": 1}, "component_id": "cmp-1"})
monkeypatch.setattr(module.PipelineOperationLogService, "get_documents_info", lambda _id: [])
res = _run(inspect.unwrap(module.rerun)())
assert res["message"] == "Document not found."
processing_doc = {"id": "doc-1", "name": "Doc-1", "kb_id": "kb-1", "progress": 0.5}
monkeypatch.setattr(module.PipelineOperationLogService, "get_documents_info", lambda _id: [dict(processing_doc)])
res = _run(inspect.unwrap(module.rerun)())
assert "is processing" in res["message"]
class _DocStore:
def __init__(self):
self.deleted = []
def index_exist(self, *_args, **_kwargs):
return True
def delete(self, *args, **_kwargs):
self.deleted.append(args)
return True
doc_store = _DocStore()
monkeypatch.setattr(module.settings, "docStoreConn", doc_store)
doc = {
"id": "doc-1",
"name": "Doc-1",
"kb_id": "kb-1",
"progress": 1.0,
"progress_msg": "old",
"chunk_num": 8,
"token_num": 12,
}
updates = {"doc": [], "pipeline": [], "tasks": [], "queue": []}
monkeypatch.setattr(module.PipelineOperationLogService, "get_documents_info", lambda _id: [dict(doc)])
monkeypatch.setattr(module.DocumentService, "clear_chunk_num_when_rerun", lambda doc_id: updates["doc"].append(("clear", doc_id)))
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda doc_id, payload: updates["doc"].append(("update", doc_id, payload)))
monkeypatch.setattr(module.TaskService, "filter_delete", lambda expr: updates["tasks"].append(expr))
monkeypatch.setattr(module.PipelineOperationLogService, "update_by_id", lambda flow_id, payload: updates["pipeline"].append((flow_id, payload)))
monkeypatch.setattr(
module,
"queue_dataflow",
lambda **kwargs: updates["queue"].append(kwargs) or (True, ""),
)
monkeypatch.setattr(module, "get_uuid", lambda: "task-rerun")
_set_request_json(monkeypatch, module, {"id": "flow-1", "dsl": {"n": 1}, "component_id": "cmp-1"})
res = _run(inspect.unwrap(module.rerun)())
assert res["code"] == module.RetCode.SUCCESS
assert doc_store.deleted
assert any(item[0] == "clear" and item[1] == "doc-1" for item in updates["doc"])
assert updates["pipeline"] and updates["pipeline"][0][1]["dsl"]["path"] == ["cmp-1"]
assert updates["queue"] and updates["queue"][0]["rerun"] is True
redis_calls = []
monkeypatch.setattr(module.REDIS_CONN, "set", lambda key, value: redis_calls.append((key, value)))
res = module.cancel("task-9")
assert res["code"] == module.RetCode.SUCCESS
assert redis_calls == [("task-9-cancel", "x")]
monkeypatch.setattr(module.REDIS_CONN, "set", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("redis fail")))
res = module.cancel("task-9")
assert res["code"] == module.RetCode.SUCCESS
@pytest.mark.p2
def test_reset_upload_input_form_debug_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
_set_request_json(monkeypatch, module, {"id": "canvas-1"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.reset)())
assert res["code"] == module.RetCode.OPERATING_ERROR
_set_request_json(monkeypatch, module, {"id": "canvas-1"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (False, None))
res = _run(inspect.unwrap(module.reset)())
assert res["message"] == "canvas not found."
class _ResetCanvas:
def __init__(self, *_args, **_kwargs):
self.reset_called = False
def reset(self):
self.reset_called = True
def __str__(self):
return '{"v": 2}'
updates = []
_set_request_json(monkeypatch, module, {"id": "canvas-1"})
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, SimpleNamespace(id="canvas-1", dsl={"v": 1})))
monkeypatch.setattr(module.UserCanvasService, "update_by_id", lambda canvas_id, payload: updates.append((canvas_id, payload)))
monkeypatch.setattr(module, "Canvas", _ResetCanvas)
res = _run(inspect.unwrap(module.reset)())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] == {"v": 2}
assert updates == [("canvas-1", {"dsl": {"v": 2}})]
_set_request_json(monkeypatch, module, {"id": "canvas-1"})
monkeypatch.setattr(module, "Canvas", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("reset boom")))
res = _run(inspect.unwrap(module.reset)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "reset boom" in res["message"]
monkeypatch.setattr(module.UserCanvasService, "get_by_canvas_id", lambda _canvas_id: (False, None))
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({"url": "http://example.com"}), files=_FileMap()))
res = _run(module.upload("canvas-1"))
assert res["message"] == "canvas not found."
monkeypatch.setattr(module.UserCanvasService, "get_by_canvas_id", lambda _canvas_id: (True, {"user_id": "tenant-1"}))
monkeypatch.setattr(
module,
"request",
_DummyRequest(
args=_Args({"url": "http://example.com"}),
files=_FileMap({"file": ["file-1"]}),
),
)
monkeypatch.setattr(module.FileService, "upload_info", lambda user_id, file_obj, url=None: {"uid": user_id, "file": file_obj, "url": url})
res = _run(module.upload("canvas-1"))
assert res["data"]["url"] == "http://example.com"
monkeypatch.setattr(
module,
"request",
_DummyRequest(
args=_Args({"url": "http://example.com"}),
files=_FileMap({"file": ["f1", "f2"]}),
),
)
monkeypatch.setattr(module.FileService, "upload_info", lambda user_id, file_obj, url=None: {"uid": user_id, "file": file_obj, "url": url})
res = _run(module.upload("canvas-1"))
assert len(res["data"]) == 2
monkeypatch.setattr(module.FileService, "upload_info", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("upload boom")))
res = _run(module.upload("canvas-1"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "upload boom" in res["message"]
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({"id": "canvas-1", "component_id": "begin"})))
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (False, None))
res = module.input_form()
assert res["message"] == "canvas not found."
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, SimpleNamespace(id="canvas-1", dsl={"n": 1})))
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [])
res = module.input_form()
assert res["code"] == module.RetCode.OPERATING_ERROR
class _InputCanvas:
def __init__(self, *_args, **_kwargs):
pass
def get_component_input_form(self, component_id):
return {"component_id": component_id}
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [object()])
monkeypatch.setattr(module, "Canvas", _InputCanvas)
res = module.input_form()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["component_id"] == "begin"
monkeypatch.setattr(module, "Canvas", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("input boom")))
res = module.input_form()
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "input boom" in res["message"]
_set_request_json(
monkeypatch,
module,
{"id": "canvas-1", "component_id": "llm-node", "params": {"p": {"value": "v"}}},
)
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.debug)())
assert res["code"] == module.RetCode.OPERATING_ERROR
class _DebugComponent(module.LLM):
def __init__(self):
self.reset_called = False
self.debug_inputs = None
self.invoked = None
def reset(self):
self.reset_called = True
def set_debug_inputs(self, params):
self.debug_inputs = params
def invoke(self, **kwargs):
self.invoked = kwargs
def output(self):
async def _gen():
yield "A"
yield "B"
return {"stream": partial(_gen)}
class _DebugCanvas:
last_component = None
def __init__(self, *_args, **_kwargs):
self.message_id = ""
self._component = _DebugComponent()
_DebugCanvas.last_component = self._component
def reset(self):
return None
def get_component(self, _component_id):
return {"obj": self._component}
_set_request_json(
monkeypatch,
module,
{"id": "canvas-1", "component_id": "llm-node", "params": {"p": {"value": "v"}}},
)
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, SimpleNamespace(id="canvas-1", dsl={"n": 1})))
monkeypatch.setattr(module, "get_uuid", lambda: "msg-1")
monkeypatch.setattr(module, "Canvas", _DebugCanvas)
res = _run(inspect.unwrap(module.debug)())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["stream"] == "AB"
assert _DebugCanvas.last_component.reset_called is True
assert _DebugCanvas.last_component.debug_inputs == {"p": {"value": "v"}}
assert _DebugCanvas.last_component.invoked == {"p": "v"}
@pytest.mark.p2
def test_debug_sync_iter_and_exception_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
class _SyncDebugComponent(module.LLM):
def __init__(self):
self.invoked = {}
def reset(self):
return None
def set_debug_inputs(self, _params):
return None
def invoke(self, **kwargs):
self.invoked = kwargs
def output(self):
def _gen():
yield "S"
yield "Y"
yield "N"
yield "C"
return {"stream": partial(_gen)}
class _SyncDebugCanvas:
def __init__(self, *_args, **_kwargs):
self.message_id = ""
self.component = _SyncDebugComponent()
def reset(self):
return None
def get_component(self, _component_id):
return {"obj": self.component}
_set_request_json(
monkeypatch,
module,
{"id": "canvas-1", "component_id": "sync-node", "params": {"p": {"value": "v"}}},
)
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, SimpleNamespace(id="canvas-1", dsl={"n": 1})))
monkeypatch.setattr(module, "Canvas", _SyncDebugCanvas)
res = _run(inspect.unwrap(module.debug)())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["stream"] == "SYNC"
monkeypatch.setattr(module, "Canvas", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("debug boom")))
res = _run(inspect.unwrap(module.debug)())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "debug boom" in res["message"]
@pytest.mark.p2
def test_test_db_connect_dialect_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
class _FakeDB:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.connected = 0
self.closed = 0
def connect(self):
self.connected += 1
def close(self):
self.closed += 1
mysql_objs = []
postgres_objs = []
def _mysql_ctor(*args, **kwargs):
obj = _FakeDB(*args, **kwargs)
mysql_objs.append(obj)
return obj
def _postgres_ctor(*args, **kwargs):
obj = _FakeDB(*args, **kwargs)
postgres_objs.append(obj)
return obj
monkeypatch.setattr(module, "MySQLDatabase", _mysql_ctor)
monkeypatch.setattr(module, "PostgresqlDatabase", _postgres_ctor)
def _run_case(payload):
_set_request_json(monkeypatch, module, payload)
return _run(inspect.unwrap(module.test_db_connect)())
req_base = {
"database": "db",
"username": "user",
"host": "host",
"port": 3306,
"password": "pwd",
}
res = _run_case({**req_base, "db_type": "mysql"})
assert res["code"] == module.RetCode.SUCCESS
assert mysql_objs[-1].connected == 1
assert mysql_objs[-1].closed == 1
res = _run_case({**req_base, "db_type": "mariadb"})
assert res["code"] == module.RetCode.SUCCESS
assert mysql_objs[-1].connected == 1
res = _run_case({**req_base, "db_type": "oceanbase"})
assert res["code"] == module.RetCode.SUCCESS
assert mysql_objs[-1].kwargs["charset"] == "utf8mb4"
res = _run_case({**req_base, "db_type": "postgres"})
assert res["code"] == module.RetCode.SUCCESS
assert postgres_objs[-1].closed == 1
mssql_calls = {}
class _MssqlCursor:
def execute(self, sql):
mssql_calls["sql"] = sql
def close(self):
mssql_calls["cursor_closed"] = True
class _MssqlConn:
def cursor(self):
mssql_calls["cursor_opened"] = True
return _MssqlCursor()
def close(self):
mssql_calls["conn_closed"] = True
pyodbc_mod = ModuleType("pyodbc")
def _pyodbc_connect(conn_str):
mssql_calls["conn_str"] = conn_str
return _MssqlConn()
pyodbc_mod.connect = _pyodbc_connect
monkeypatch.setitem(sys.modules, "pyodbc", pyodbc_mod)
res = _run_case({**req_base, "db_type": "mssql"})
assert res["code"] == module.RetCode.SUCCESS
assert "DRIVER={ODBC Driver 17 for SQL Server}" in mssql_calls["conn_str"]
assert mssql_calls["sql"] == "SELECT 1"
ibm_calls = {}
ibm_db_mod = ModuleType("ibm_db")
def _ibm_connect(conn_str, *_args):
ibm_calls["conn_str"] = conn_str
return "ibm-conn"
def _ibm_exec_immediate(conn, sql):
ibm_calls["exec"] = (conn, sql)
return "ibm-stmt"
ibm_db_mod.connect = _ibm_connect
ibm_db_mod.exec_immediate = _ibm_exec_immediate
ibm_db_mod.fetch_assoc = lambda stmt: ibm_calls.update({"fetch": stmt}) or {"one": 1}
ibm_db_mod.close = lambda conn: ibm_calls.update({"close": conn})
monkeypatch.setitem(sys.modules, "ibm_db", ibm_db_mod)
res = _run_case({**req_base, "db_type": "IBM DB2"})
assert res["code"] == module.RetCode.SUCCESS
assert ibm_calls["exec"] == ("ibm-conn", "SELECT 1 FROM sysibm.sysdummy1")
monkeypatch.setitem(sys.modules, "trino", None)
res = _run_case({**req_base, "db_type": "trino", "database": "catalog.schema"})
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "Missing dependency 'trino'" in res["message"]
trino_calls = {"connect": [], "auth": []}
class _TrinoCursor:
def execute(self, sql):
trino_calls["sql"] = sql
def fetchall(self):
trino_calls["fetched"] = True
return [(1,)]
def close(self):
trino_calls["cursor_closed"] = True
class _TrinoConn:
def cursor(self):
return _TrinoCursor()
def close(self):
trino_calls["conn_closed"] = True
trino_mod = ModuleType("trino")
trino_mod.BasicAuthentication = lambda user, password: trino_calls["auth"].append((user, password)) or ("auth", user)
trino_mod.dbapi = SimpleNamespace(connect=lambda **kwargs: trino_calls["connect"].append(kwargs) or _TrinoConn())
monkeypatch.setitem(sys.modules, "trino", trino_mod)
res = _run_case({**req_base, "db_type": "trino", "database": ""})
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "catalog.schema" in res["message"]
monkeypatch.setenv("TRINO_USE_TLS", "1")
res = _run_case({**req_base, "db_type": "trino", "database": "cat.schema"})
assert res["code"] == module.RetCode.SUCCESS
assert trino_calls["connect"][-1]["catalog"] == "cat"
assert trino_calls["connect"][-1]["schema"] == "schema"
assert trino_calls["auth"][-1] == ("user", "pwd")
res = _run_case({**req_base, "db_type": "trino", "database": "cat/schema"})
assert res["code"] == module.RetCode.SUCCESS
assert trino_calls["connect"][-1]["catalog"] == "cat"
assert trino_calls["connect"][-1]["schema"] == "schema"
res = _run_case({**req_base, "db_type": "trino", "database": "catalog"})
assert res["code"] == module.RetCode.SUCCESS
assert trino_calls["connect"][-1]["catalog"] == "catalog"
assert trino_calls["connect"][-1]["schema"] == "default"
res = _run_case({**req_base, "db_type": "unknown"})
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "Unsupported database type." in res["message"]
class _BoomDB(_FakeDB):
def connect(self):
raise RuntimeError("connect boom")
monkeypatch.setattr(module, "MySQLDatabase", lambda *_args, **_kwargs: _BoomDB())
res = _run_case({**req_base, "db_type": "mysql"})
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "connect boom" in res["message"]
@pytest.mark.p2
def test_canvas_history_list_and_setting_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
class _Version:
def __init__(self, version_id, update_time):
self.version_id = version_id
self.update_time = update_time
def to_dict(self):
return {"id": self.version_id, "update_time": self.update_time}
monkeypatch.setattr(
module.UserCanvasVersionService,
"list_by_canvas_id",
lambda _canvas_id: [_Version("v1", 1), _Version("v2", 5)],
)
res = module.getlistversion("canvas-1")
assert [item["id"] for item in res["data"]] == ["v2", "v1"]
monkeypatch.setattr(
module.UserCanvasVersionService,
"list_by_canvas_id",
lambda _canvas_id: (_ for _ in ()).throw(RuntimeError("history boom")),
)
res = module.getlistversion("canvas-1")
assert "Error getting history files: history boom" in res["message"]
monkeypatch.setattr(
module.UserCanvasVersionService,
"get_by_id",
lambda _version_id: (True, _Version("v3", 3)),
)
res = module.getversion("v3")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["id"] == "v3"
monkeypatch.setattr(
module.UserCanvasVersionService,
"get_by_id",
lambda _version_id: (_ for _ in ()).throw(RuntimeError("version boom")),
)
res = module.getversion("v3")
assert "Error getting history file: version boom" in res["data"]
list_calls = []
def _get_by_tenant_ids(tenants, user_id, page_number, page_size, orderby, desc, keywords, canvas_category):
list_calls.append((tenants, user_id, page_number, page_size, orderby, desc, keywords, canvas_category))
return [{"id": "canvas-1"}], 1
monkeypatch.setattr(module.UserCanvasService, "get_by_tenant_ids", _get_by_tenant_ids)
monkeypatch.setattr(
module.TenantService,
"get_joined_tenants_by_user_id",
lambda _user_id: [{"tenant_id": "t1"}, {"tenant_id": "t2"}],
)
monkeypatch.setattr(
module,
"request",
_DummyRequest(
args=_Args(
{
"keywords": "kw",
"page": "2",
"page_size": "3",
"orderby": "update_time",
"canvas_category": "agent",
"desc": "false",
}
)
),
)
res = module.list_canvas()
assert res["code"] == module.RetCode.SUCCESS
assert list_calls[-1][0] == ["t1", "t2", "user-1"]
assert list_calls[-1][2:6] == (2, 3, "update_time", False)
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({"owner_ids": "u1,u2", "desc": "true"})))
res = module.list_canvas()
assert res["code"] == module.RetCode.SUCCESS
assert list_calls[-1][0] == ["u1", "u2"]
assert list_calls[-1][2:4] == (0, 0)
assert list_calls[-1][5] is True
_set_request_json(monkeypatch, module, {"id": "canvas-1", "title": "T", "permission": "private"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.setting)())
assert res["code"] == module.RetCode.OPERATING_ERROR
_set_request_json(monkeypatch, module, {"id": "canvas-1", "title": "T", "permission": "private"})
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (False, None))
res = _run(inspect.unwrap(module.setting)())
assert res["message"] == "canvas not found."
updates = []
_set_request_json(
monkeypatch,
module,
{
"id": "canvas-1",
"title": "New title",
"permission": "private",
"description": "new desc",
"avatar": "avatar.png",
},
)
monkeypatch.setattr(
module.UserCanvasService,
"get_by_id",
lambda _canvas_id: (True, SimpleNamespace(to_dict=lambda: {"id": "canvas-1", "title": "Old"})),
)
monkeypatch.setattr(module.UserCanvasService, "update_by_id", lambda canvas_id, payload: updates.append((canvas_id, payload)) or 2)
res = _run(inspect.unwrap(module.setting)())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] == 2
assert updates[-1][0] == "canvas-1"
assert updates[-1][1]["title"] == "New title"
assert updates[-1][1]["description"] == "new desc"
assert updates[-1][1]["permission"] == "private"
assert updates[-1][1]["avatar"] == "avatar.png"
@pytest.mark.p2
def test_trace_and_sessions_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({"canvas_id": "c1", "message_id": "m1"})))
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _key: None)
res = module.trace()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] == {}
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _key: '{"event":"ok"}')
res = module.trace()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] == {"event": "ok"}
monkeypatch.setattr(module.REDIS_CONN, "get", lambda _key: (_ for _ in ()).throw(RuntimeError("trace boom")))
res = module.trace()
assert res is None
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({})))
res = module.sessions("canvas-1")
assert res["code"] == module.RetCode.OPERATING_ERROR
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({"desc": "false", "exp_user_id": "exp-1"})))
monkeypatch.setattr(module.API4ConversationService, "get_names", lambda _canvas_id, _exp_user_id: [{"id": "s1"}, {"id": "s2"}])
res = module.sessions("canvas-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["total"] == 2
list_calls = []
def _get_list(*args, **kwargs):
list_calls.append((args, kwargs))
return 7, [{"id": "s3"}]
monkeypatch.setattr(module.API4ConversationService, "get_list", _get_list)
monkeypatch.setattr(
module,
"request",
_DummyRequest(args=_Args({"page": "3", "page_size": "9", "orderby": "update_time", "dsl": "false"})),
)
res = module.sessions("canvas-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["total"] == 7
assert list_calls[-1][0][4] == "update_time"
assert list_calls[-1][0][5] is True
assert list_calls[-1][0][8] is False
monkeypatch.setattr(module, "get_json_result", lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("result boom")))
res = module.sessions("canvas-1")
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "result boom" in res["message"]
@pytest.mark.p2
def test_session_crud_prompts_and_download_matrix_unit(monkeypatch):
module = _load_canvas_module(monkeypatch)
class _SessionCanvas:
def __init__(self, *_args, **_kwargs):
self.reset_called = False
def reset(self):
self.reset_called = True
_set_request_json(monkeypatch, module, {"name": "Sess1"})
monkeypatch.setattr(module.UserCanvasService, "get_by_id", lambda _canvas_id: (True, SimpleNamespace(id="canvas-1", dsl={"n": 1})))
monkeypatch.setattr(module, "Canvas", _SessionCanvas)
monkeypatch.setattr(module, "get_uuid", lambda: "sess-1")
saved = []
monkeypatch.setattr(module.API4ConversationService, "save", lambda **kwargs: saved.append(kwargs))
res = _run(inspect.unwrap(module.set_session)("canvas-1"))
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["id"] == "sess-1"
assert isinstance(res["data"]["dsl"], str)
assert saved and saved[-1]["id"] == "sess-1"
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = module.get_session("canvas-1", "sess-1")
assert res["code"] == module.RetCode.OPERATING_ERROR
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.API4ConversationService, "get_by_id", lambda _session_id: (True, SimpleNamespace(to_dict=lambda: {"id": _session_id})))
res = module.get_session("canvas-1", "sess-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["id"] == "sess-1"
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: False)
res = module.del_session("canvas-1", "sess-1")
assert res["code"] == module.RetCode.OPERATING_ERROR
monkeypatch.setattr(module.UserCanvasService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.API4ConversationService, "delete_by_id", lambda _session_id: _session_id == "sess-1")
res = module.del_session("canvas-1", "sess-1")
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
rag_prompts_pkg = ModuleType("rag.prompts")
rag_prompts_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.prompts", rag_prompts_pkg)
rag_generator_mod = ModuleType("rag.prompts.generator")
rag_generator_mod.ANALYZE_TASK_SYSTEM = "SYS"
rag_generator_mod.ANALYZE_TASK_USER = "USER"
rag_generator_mod.NEXT_STEP = "NEXT"
rag_generator_mod.REFLECT = "REFLECT"
rag_generator_mod.CITATION_PROMPT_TEMPLATE = "CITE"
monkeypatch.setitem(sys.modules, "rag.prompts.generator", rag_generator_mod)
res = module.prompts()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["task_analysis"] == "SYS\n\nUSER"
assert res["data"]["plan_generation"] == "NEXT"
assert res["data"]["reflection"] == "REFLECT"
assert res["data"]["citation_guidelines"] == "CITE"
monkeypatch.setattr(module, "request", _DummyRequest(args=_Args({"id": "f1", "created_by": "u1"})))
monkeypatch.setattr(module.FileService, "get_blob", lambda _created_by, _id: b"blob-data")
res = _run(module.download())
assert res == {"blob": b"blob-data"}
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_canvas_app/test_canvas_routes_unit.py",
"license": "Apache License 2.0",
"lines": 1134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_connector_app/test_connector_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import json
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
def to_dict(self, flat=True):
return dict(self)
class _FakeResponse:
def __init__(self, body, status_code):
self.body = body
self.status_code = status_code
self.headers = {}
class _FakeConnectorRecord:
def __init__(self, payload):
self._payload = payload
def to_dict(self):
return dict(self._payload)
class _FakeCredentials:
def __init__(self, raw='{"refresh_token":"rt","access_token":"at"}'):
self._raw = raw
def to_json(self):
return self._raw
class _FakeFlow:
def __init__(self, client_config, scopes):
self.client_config = client_config
self.scopes = scopes
self.redirect_uri = None
self.credentials = _FakeCredentials()
self.auth_kwargs = None
self.token_code = None
def authorization_url(self, **kwargs):
self.auth_kwargs = dict(kwargs)
return f"https://oauth.example/{kwargs['state']}", kwargs["state"]
def fetch_token(self, code):
self.token_code = code
class _FakeBoxToken:
def __init__(self, access_token, refresh_token):
self.access_token = access_token
self.refresh_token = refresh_token
class _FakeBoxOAuth:
def __init__(self, config):
self.config = config
self.exchange_code = None
def get_authorize_url(self, options):
return f"https://box.example/auth?state={options.state}&redirect={options.redirect_uri}"
def get_tokens_authorization_code_grant(self, code):
self.exchange_code = code
def retrieve_token(self):
return _FakeBoxToken("box-access", "box-refresh")
class _FakeRedis:
def __init__(self):
self.store = {}
self.set_calls = []
self.deleted = []
def get(self, key):
return self.store.get(key)
def set_obj(self, key, obj, ttl):
self.set_calls.append((key, obj, ttl))
self.store[key] = json.dumps(obj)
def delete(self, key):
self.deleted.append(key)
self.store.pop(key, None)
def _run(coro):
return asyncio.run(coro)
def _set_request(module, *, args=None, json_body=None):
module.request = SimpleNamespace(
args=_Args(args or {}),
json=_AwaitableValue({} if json_body is None else json_body),
)
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_connector_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="tenant-1")
apps_mod.login_required = lambda fn: fn
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
db_mod = ModuleType("api.db")
db_mod.InputType = SimpleNamespace(POLL="POLL")
monkeypatch.setitem(sys.modules, "api.db", db_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
connector_service_mod = ModuleType("api.db.services.connector_service")
class _StubConnectorService:
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def get_by_id(_connector_id):
return True, _FakeConnectorRecord({"id": _connector_id})
@staticmethod
def list(_tenant_id):
return []
@staticmethod
def resume(*_args, **_kwargs):
return True
@staticmethod
def rebuild(*_args, **_kwargs):
return None
@staticmethod
def delete_by_id(*_args, **_kwargs):
return True
class _StubSyncLogsService:
@staticmethod
def list_sync_tasks(*_args, **_kwargs):
return [], 0
connector_service_mod.ConnectorService = _StubConnectorService
connector_service_mod.SyncLogsService = _StubSyncLogsService
monkeypatch.setitem(sys.modules, "api.db.services.connector_service", connector_service_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
async def _get_request_json():
return {}
api_utils_mod.get_request_json = _get_request_json
api_utils_mod.get_json_result = lambda data=None, message="", code=0: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.get_data_error_result = lambda message="", code=400, data=None: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.validate_request = lambda *_args, **_kwargs: (lambda fn: fn)
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
constants_mod = ModuleType("common.constants")
constants_mod.RetCode = SimpleNamespace(
ARGUMENT_ERROR=101,
SERVER_ERROR=500,
RUNNING=102,
PERMISSION_ERROR=403,
)
constants_mod.TaskStatus = SimpleNamespace(SCHEDULE="schedule", CANCEL="cancel")
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
config_mod = ModuleType("common.data_source.config")
config_mod.GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI = "https://example.com/drive"
config_mod.GMAIL_WEB_OAUTH_REDIRECT_URI = "https://example.com/gmail"
config_mod.BOX_WEB_OAUTH_REDIRECT_URI = "https://example.com/box"
config_mod.DocumentSource = SimpleNamespace(GMAIL="gmail", GOOGLE_DRIVE="google-drive")
monkeypatch.setitem(sys.modules, "common.data_source.config", config_mod)
google_constants_mod = ModuleType("common.data_source.google_util.constant")
google_constants_mod.WEB_OAUTH_POPUP_TEMPLATE = (
"<html><head><title>{title}</title></head>"
"<body><h1>{heading}</h1><p>{message}</p><script>{payload_json}</script><script>{auto_close}</script></body></html>"
)
google_constants_mod.GOOGLE_SCOPES = {
config_mod.DocumentSource.GMAIL: ["scope-gmail"],
config_mod.DocumentSource.GOOGLE_DRIVE: ["scope-drive"],
}
monkeypatch.setitem(sys.modules, "common.data_source.google_util.constant", google_constants_mod)
misc_mod = ModuleType("common.misc_utils")
misc_mod.get_uuid = lambda: "uuid-from-helper"
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_mod)
rag_pkg = ModuleType("rag")
rag_pkg.__path__ = [str(repo_root / "rag")]
monkeypatch.setitem(sys.modules, "rag", rag_pkg)
rag_utils_pkg = ModuleType("rag.utils")
rag_utils_pkg.__path__ = [str(repo_root / "rag" / "utils")]
monkeypatch.setitem(sys.modules, "rag.utils", rag_utils_pkg)
redis_mod = ModuleType("rag.utils.redis_conn")
redis_mod.REDIS_CONN = _FakeRedis()
monkeypatch.setitem(sys.modules, "rag.utils.redis_conn", redis_mod)
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args=_Args(), json=_AwaitableValue({}))
async def _make_response(body, status_code):
return _FakeResponse(body, status_code)
quart_mod.make_response = _make_response
monkeypatch.setitem(sys.modules, "quart", quart_mod)
google_pkg = ModuleType("google_auth_oauthlib")
google_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "google_auth_oauthlib", google_pkg)
google_flow_mod = ModuleType("google_auth_oauthlib.flow")
class _StubFlow:
@classmethod
def from_client_config(cls, client_config, scopes):
return _FakeFlow(client_config, scopes)
google_flow_mod.Flow = _StubFlow
monkeypatch.setitem(sys.modules, "google_auth_oauthlib.flow", google_flow_mod)
box_mod = ModuleType("box_sdk_gen")
class _OAuthConfig:
def __init__(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
class _GetAuthorizeUrlOptions:
def __init__(self, redirect_uri, state):
self.redirect_uri = redirect_uri
self.state = state
box_mod.BoxOAuth = _FakeBoxOAuth
box_mod.OAuthConfig = _OAuthConfig
box_mod.GetAuthorizeUrlOptions = _GetAuthorizeUrlOptions
monkeypatch.setitem(sys.modules, "box_sdk_gen", box_mod)
module_path = repo_root / "api" / "apps" / "connector_app.py"
spec = importlib.util.spec_from_file_location("test_connector_routes_unit", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_connector_basic_routes_and_task_controls(monkeypatch):
module = _load_connector_app(monkeypatch)
async def _no_sleep(_secs):
return None
monkeypatch.setattr(module.asyncio, "sleep", _no_sleep)
records = {"conn-1": _FakeConnectorRecord({"id": "conn-1", "source": "drive"})}
update_calls = []
save_calls = []
resume_calls = []
delete_calls = []
monkeypatch.setattr(module.ConnectorService, "update_by_id", lambda cid, payload: update_calls.append((cid, payload)))
def _save(**payload):
save_calls.append(payload)
records[payload["id"]] = _FakeConnectorRecord(payload)
monkeypatch.setattr(module.ConnectorService, "save", _save)
monkeypatch.setattr(module.ConnectorService, "get_by_id", lambda cid: (True, records[cid]))
monkeypatch.setattr(module.ConnectorService, "list", lambda tenant_id: [{"id": "listed", "tenant": tenant_id}])
monkeypatch.setattr(module.SyncLogsService, "list_sync_tasks", lambda cid, page, page_size: ([{"id": "log-1"}], 9))
monkeypatch.setattr(module.ConnectorService, "resume", lambda cid, status: resume_calls.append((cid, status)))
monkeypatch.setattr(module.ConnectorService, "delete_by_id", lambda cid: delete_calls.append(cid))
monkeypatch.setattr(module, "get_uuid", lambda: "generated-id")
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"id": "conn-1", "refresh_freq": 7, "config": {"x": 1}}),
)
res = _run(module.set_connector())
assert update_calls == [("conn-1", {"refresh_freq": 7, "config": {"x": 1}})]
assert res["data"]["id"] == "conn-1"
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"name": "new", "source": "gmail", "config": {"y": 2}}),
)
res = _run(module.set_connector())
assert save_calls[-1]["id"] == "generated-id"
assert save_calls[-1]["tenant_id"] == "tenant-1"
assert save_calls[-1]["input_type"] == module.InputType.POLL
assert res["data"]["id"] == "generated-id"
list_res = module.list_connector()
assert list_res["data"] == [{"id": "listed", "tenant": "tenant-1"}]
monkeypatch.setattr(module.ConnectorService, "get_by_id", lambda _cid: (False, None))
missing_res = module.get_connector("missing")
assert missing_res["message"] == "Can't find this Connector!"
monkeypatch.setattr(module.ConnectorService, "get_by_id", lambda cid: (True, _FakeConnectorRecord({"id": cid})))
found_res = module.get_connector("conn-2")
assert found_res["data"]["id"] == "conn-2"
_set_request(module, args={"page": "2", "page_size": "7"})
logs_res = module.list_logs("conn-log")
assert logs_res["data"] == {"total": 9, "logs": [{"id": "log-1"}]}
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"resume": True}))
assert _run(module.resume("conn-r1"))["data"] is True
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"resume": False}))
assert _run(module.resume("conn-r2"))["data"] is True
assert ("conn-r1", module.TaskStatus.SCHEDULE) in resume_calls
assert ("conn-r2", module.TaskStatus.CANCEL) in resume_calls
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"kb_id": "kb-1"}))
monkeypatch.setattr(module.ConnectorService, "rebuild", lambda *_args: "rebuild-failed")
failed_rebuild = _run(module.rebuild("conn-rb"))
assert failed_rebuild["code"] == module.RetCode.SERVER_ERROR
assert failed_rebuild["data"] is False
monkeypatch.setattr(module.ConnectorService, "rebuild", lambda *_args: None)
ok_rebuild = _run(module.rebuild("conn-rb"))
assert ok_rebuild["data"] is True
rm_res = module.rm_connector("conn-rm")
assert rm_res["data"] is True
assert ("conn-rm", module.TaskStatus.CANCEL) in resume_calls
assert delete_calls == ["conn-rm"]
@pytest.mark.p2
def test_connector_oauth_helper_functions(monkeypatch):
module = _load_connector_app(monkeypatch)
assert module._web_state_cache_key("flow-a", "gmail") == "gmail_web_flow_state:flow-a"
assert module._web_result_cache_key("flow-b", "google-drive") == "google-drive_web_flow_result:flow-b"
creds_dict = {"web": {"client_id": "id"}}
assert module._load_credentials(creds_dict) == creds_dict
assert module._load_credentials(json.dumps(creds_dict)) == creds_dict
with pytest.raises(ValueError, match="Invalid Google credentials JSON"):
module._load_credentials("{not-json")
assert module._get_web_client_config(creds_dict) == {"web": {"client_id": "id"}}
with pytest.raises(ValueError, match="must include a 'web'"):
module._get_web_client_config({"installed": {"client_id": "id"}})
popup_ok = _run(module._render_web_oauth_popup("flow-1", True, "done", "gmail"))
assert popup_ok.status_code == 200
assert popup_ok.headers["Content-Type"] == "text/html; charset=utf-8"
assert "Authorization complete" in popup_ok.body
assert "ragflow-gmail-oauth" in popup_ok.body
popup_error = _run(module._render_web_oauth_popup("flow-2", False, "<denied>", "google-drive"))
assert popup_error.status_code == 200
assert "Authorization failed" in popup_error.body
assert "<denied>" in popup_error.body
@pytest.mark.p2
def test_start_google_web_oauth_matrix(monkeypatch):
module = _load_connector_app(monkeypatch)
redis = _FakeRedis()
monkeypatch.setattr(module, "REDIS_CONN", redis)
monkeypatch.setattr(module.time, "time", lambda: 1700000000)
flow_calls = []
def _from_client_config(client_config, scopes):
flow = _FakeFlow(client_config, scopes)
flow_calls.append(flow)
return flow
monkeypatch.setattr(module.Flow, "from_client_config", staticmethod(_from_client_config))
_set_request(module, args={"type": "invalid"})
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"credentials": "{}"}))
invalid_type = _run(module.start_google_web_oauth())
assert invalid_type["code"] == module.RetCode.ARGUMENT_ERROR
monkeypatch.setattr(module, "GMAIL_WEB_OAUTH_REDIRECT_URI", "")
_set_request(module, args={"type": "gmail"})
missing_redirect = _run(module.start_google_web_oauth())
assert missing_redirect["code"] == module.RetCode.SERVER_ERROR
monkeypatch.setattr(module, "GMAIL_WEB_OAUTH_REDIRECT_URI", "https://example.com/gmail")
monkeypatch.setattr(module, "GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI", "https://example.com/drive")
_set_request(module, args={"type": "google-drive"})
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"credentials": "{invalid-json"}))
invalid_credentials = _run(module.start_google_web_oauth())
assert invalid_credentials["code"] == module.RetCode.ARGUMENT_ERROR
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"credentials": json.dumps({"web": {"client_id": "id"}, "refresh_token": "rt"})}),
)
has_refresh_token = _run(module.start_google_web_oauth())
assert has_refresh_token["code"] == module.RetCode.ARGUMENT_ERROR
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"credentials": json.dumps({"installed": {"x": 1}})}))
missing_web = _run(module.start_google_web_oauth())
assert missing_web["code"] == module.RetCode.ARGUMENT_ERROR
ids = iter(["flow-gmail", "flow-drive"])
monkeypatch.setattr(module.uuid, "uuid4", lambda: next(ids))
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"credentials": json.dumps({"web": {"client_id": "id", "client_secret": "secret"}})}),
)
_set_request(module, args={"type": "gmail"})
gmail_ok = _run(module.start_google_web_oauth())
assert gmail_ok["code"] == 0
assert gmail_ok["data"]["flow_id"] == "flow-gmail"
assert gmail_ok["data"]["authorization_url"].endswith("flow-gmail")
_set_request(module, args={})
drive_ok = _run(module.start_google_web_oauth())
assert drive_ok["code"] == 0
assert drive_ok["data"]["flow_id"] == "flow-drive"
assert drive_ok["data"]["authorization_url"].endswith("flow-drive")
assert any(call.scopes == module.GOOGLE_SCOPES[module.DocumentSource.GMAIL] for call in flow_calls)
assert any(call.scopes == module.GOOGLE_SCOPES[module.DocumentSource.GOOGLE_DRIVE] for call in flow_calls)
assert "gmail_web_flow_state:flow-gmail" in redis.store
assert "google-drive_web_flow_state:flow-drive" in redis.store
@pytest.mark.p2
def test_google_web_oauth_callbacks_matrix(monkeypatch):
module = _load_connector_app(monkeypatch)
flow_calls = []
def _from_client_config(client_config, scopes):
flow = _FakeFlow(client_config, scopes)
flow_calls.append(flow)
return flow
monkeypatch.setattr(module.Flow, "from_client_config", staticmethod(_from_client_config))
callback_specs = [
(
module.google_gmail_web_oauth_callback,
"gmail",
module.GMAIL_WEB_OAUTH_REDIRECT_URI,
module.GOOGLE_SCOPES[module.DocumentSource.GMAIL],
),
(
module.google_drive_web_oauth_callback,
"google-drive",
module.GOOGLE_DRIVE_WEB_OAUTH_REDIRECT_URI,
module.GOOGLE_SCOPES[module.DocumentSource.GOOGLE_DRIVE],
),
]
for callback, source, expected_redirect, expected_scopes in callback_specs:
redis = _FakeRedis()
monkeypatch.setattr(module, "REDIS_CONN", redis)
_set_request(module, args={})
missing_state = _run(callback())
assert "Missing OAuth state parameter." in missing_state.body
_set_request(module, args={"state": "sid"})
expired_state = _run(callback())
assert "Authorization session expired" in expired_state.body
redis.store[module._web_state_cache_key("sid", source)] = json.dumps({"user_id": "tenant-1"})
_set_request(module, args={"state": "sid"})
invalid_state = _run(callback())
assert "Authorization session was invalid" in invalid_state.body
assert module._web_state_cache_key("sid", source) in redis.deleted
redis.store[module._web_state_cache_key("sid", source)] = json.dumps({
"user_id": "tenant-1",
"client_config": {"web": {"client_id": "cid"}},
})
_set_request(module, args={"state": "sid", "error": "denied", "error_description": "permission denied"})
oauth_error = _run(callback())
assert "permission denied" in oauth_error.body
redis.store[module._web_state_cache_key("sid", source)] = json.dumps({
"user_id": "tenant-1",
"client_config": {"web": {"client_id": "cid"}},
})
_set_request(module, args={"state": "sid"})
missing_code = _run(callback())
assert "Missing authorization code" in missing_code.body
redis.store[module._web_state_cache_key("sid", source)] = json.dumps({
"user_id": "tenant-1",
"client_config": {"web": {"client_id": "cid"}},
})
_set_request(module, args={"state": "sid", "code": "code-123"})
success = _run(callback())
assert "Authorization completed successfully." in success.body
result_key = module._web_result_cache_key("sid", source)
assert result_key in redis.store
assert module._web_state_cache_key("sid", source) in redis.deleted
assert flow_calls[-1].redirect_uri == expected_redirect
assert flow_calls[-1].scopes == expected_scopes
assert flow_calls[-1].token_code == "code-123"
@pytest.mark.p2
def test_poll_google_web_result_matrix(monkeypatch):
module = _load_connector_app(monkeypatch)
redis = _FakeRedis()
monkeypatch.setattr(module, "REDIS_CONN", redis)
_set_request(module, args={"type": "invalid"}, json_body={"flow_id": "flow-1"})
invalid_type = _run(module.poll_google_web_result())
assert invalid_type["code"] == module.RetCode.ARGUMENT_ERROR
_set_request(module, args={"type": "gmail"}, json_body={"flow_id": "flow-1"})
pending = _run(module.poll_google_web_result())
assert pending["code"] == module.RetCode.RUNNING
redis.store[module._web_result_cache_key("flow-1", "gmail")] = json.dumps(
{"user_id": "another-user", "credentials": "token-x"}
)
_set_request(module, args={"type": "gmail"}, json_body={"flow_id": "flow-1"})
permission_error = _run(module.poll_google_web_result())
assert permission_error["code"] == module.RetCode.PERMISSION_ERROR
redis.store[module._web_result_cache_key("flow-1", "gmail")] = json.dumps(
{"user_id": "tenant-1", "credentials": "token-ok"}
)
_set_request(module, args={"type": "gmail"}, json_body={"flow_id": "flow-1"})
success = _run(module.poll_google_web_result())
assert success["code"] == 0
assert success["data"] == {"credentials": "token-ok"}
assert module._web_result_cache_key("flow-1", "gmail") in redis.deleted
@pytest.mark.p2
def test_box_oauth_start_callback_and_poll_matrix(monkeypatch):
module = _load_connector_app(monkeypatch)
redis = _FakeRedis()
monkeypatch.setattr(module, "REDIS_CONN", redis)
created_auth = []
class _TrackingBoxOAuth(_FakeBoxOAuth):
def __init__(self, config):
super().__init__(config)
created_auth.append(self)
monkeypatch.setattr(module, "BoxOAuth", _TrackingBoxOAuth)
monkeypatch.setattr(module.uuid, "uuid4", lambda: "flow-box")
monkeypatch.setattr(module.time, "time", lambda: 1800000000)
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({}))
missing_params = _run(module.start_box_web_oauth())
assert missing_params["code"] == module.RetCode.ARGUMENT_ERROR
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue({"client_id": "cid", "client_secret": "sec", "redirect_uri": "https://box.local/callback"}),
)
start_ok = _run(module.start_box_web_oauth())
assert start_ok["code"] == 0
assert start_ok["data"]["flow_id"] == "flow-box"
assert "authorization_url" in start_ok["data"]
assert module._web_state_cache_key("flow-box", "box") in redis.store
_set_request(module, args={})
missing_state = _run(module.box_web_oauth_callback())
assert "Missing OAuth parameters." in missing_state.body
_set_request(module, args={"state": "flow-box"})
missing_code = _run(module.box_web_oauth_callback())
assert "Missing authorization code from Box." in missing_code.body
redis.store[module._web_state_cache_key("flow-null", "box")] = "null"
_set_request(module, args={"state": "flow-null", "code": "abc"})
invalid_session = _run(module.box_web_oauth_callback())
assert invalid_session["code"] == module.RetCode.ARGUMENT_ERROR
redis.store[module._web_state_cache_key("flow-box", "box")] = json.dumps(
{"user_id": "tenant-1", "client_id": "cid", "client_secret": "sec"}
)
_set_request(module, args={"state": "flow-box", "code": "abc", "error": "access_denied", "error_description": "denied"})
callback_error = _run(module.box_web_oauth_callback())
assert "denied" in callback_error.body
redis.store[module._web_state_cache_key("flow-ok", "box")] = json.dumps(
{"user_id": "tenant-1", "client_id": "cid", "client_secret": "sec"}
)
_set_request(module, args={"state": "flow-ok", "code": "code-ok"})
callback_success = _run(module.box_web_oauth_callback())
assert "Authorization completed successfully." in callback_success.body
assert created_auth[-1].exchange_code == "code-ok"
assert module._web_result_cache_key("flow-ok", "box") in redis.store
assert module._web_state_cache_key("flow-ok", "box") in redis.deleted
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue({"flow_id": "flow-ok"}))
redis.store.pop(module._web_result_cache_key("flow-ok", "box"), None)
pending = _run(module.poll_box_web_result())
assert pending["code"] == module.RetCode.RUNNING
redis.store[module._web_result_cache_key("flow-ok", "box")] = json.dumps({"user_id": "another-user"})
permission_error = _run(module.poll_box_web_result())
assert permission_error["code"] == module.RetCode.PERMISSION_ERROR
redis.store[module._web_result_cache_key("flow-ok", "box")] = json.dumps(
{"user_id": "tenant-1", "access_token": "at", "refresh_token": "rt"}
)
poll_success = _run(module.poll_box_web_result())
assert poll_success["code"] == 0
assert poll_success["data"]["credentials"]["access_token"] == "at"
assert module._web_result_cache_key("flow-ok", "box") in redis.deleted
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_connector_app/test_connector_routes_unit.py",
"license": "Apache License 2.0",
"lines": 549,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dataset_management/test_dataset_sdk_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import functools
import importlib.util
import inspect
import json
import os
import sys
from copy import deepcopy
from enum import Enum
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyArgs(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if value is None or type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _Field:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, "==", other)
class _KB:
def __init__(
self,
*,
kb_id="kb-1",
name="old",
tenant_id="tenant-1",
parser_id="naive",
parser_config=None,
embd_id="embd-1",
chunk_num=0,
pagerank=0,
graphrag_task_id="",
raptor_task_id="",
):
self.id = kb_id
self.name = name
self.tenant_id = tenant_id
self.parser_id = parser_id
self.parser_config = parser_config or {}
self.embd_id = embd_id
self.chunk_num = chunk_num
self.pagerank = pagerank
self.graphrag_task_id = graphrag_task_id
self.raptor_task_id = raptor_task_id
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"tenant_id": self.tenant_id,
"parser_id": self.parser_id,
"parser_config": deepcopy(self.parser_config),
"embd_id": self.embd_id,
"pagerank": self.pagerank,
}
def _run(coro):
return asyncio.run(coro)
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _set_request_args(monkeypatch, module, args):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_DummyArgs(args)))
def _patch_json_parser(monkeypatch, module, payload_state, err_state=None):
async def _parse_json(*_args, **_kwargs):
return deepcopy(payload_state), err_state
monkeypatch.setattr(module, "validate_and_parse_json_request", _parse_json)
def _load_dataset_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.Request = type("Request", (), {})
quart_mod.request = SimpleNamespace(args=_DummyArgs())
monkeypatch.setitem(sys.modules, "quart", quart_mod)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
utils_pkg = ModuleType("api.utils")
utils_pkg.__path__ = [str(repo_root / "api" / "utils")]
monkeypatch.setitem(sys.modules, "api.utils", utils_pkg)
api_pkg.utils = utils_pkg
apps_pkg = ModuleType("api.apps")
apps_pkg.__path__ = [str(repo_root / "api" / "apps")]
monkeypatch.setitem(sys.modules, "api.apps", apps_pkg)
api_pkg.apps = apps_pkg
sdk_pkg = ModuleType("api.apps.sdk")
sdk_pkg.__path__ = [str(repo_root / "api" / "apps" / "sdk")]
monkeypatch.setitem(sys.modules, "api.apps.sdk", sdk_pkg)
apps_pkg.sdk = sdk_pkg
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.File = SimpleNamespace(
source_type=_Field("source_type"),
id=_Field("id"),
type=_Field("type"),
name=_Field("name"),
)
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
document_service_mod = ModuleType("api.db.services.document_service")
class _StubDocumentService:
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def remove_document(*_args, **_kwargs):
return True
@staticmethod
def get_by_kb_id(**_kwargs):
return [], 0
document_service_mod.DocumentService = _StubDocumentService
document_service_mod.queue_raptor_o_graphrag_tasks = lambda **_kwargs: "task-queued"
monkeypatch.setitem(sys.modules, "api.db.services.document_service", document_service_mod)
services_pkg.document_service = document_service_mod
file2document_service_mod = ModuleType("api.db.services.file2document_service")
class _StubFile2DocumentService:
@staticmethod
def get_by_document_id(_doc_id):
return [SimpleNamespace(file_id="file-1")]
@staticmethod
def delete_by_document_id(_doc_id):
return None
file2document_service_mod.File2DocumentService = _StubFile2DocumentService
monkeypatch.setitem(sys.modules, "api.db.services.file2document_service", file2document_service_mod)
services_pkg.file2document_service = file2document_service_mod
file_service_mod = ModuleType("api.db.services.file_service")
class _StubFileService:
@staticmethod
def filter_delete(_filters):
return None
file_service_mod.FileService = _StubFileService
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
services_pkg.file_service = file_service_mod
knowledgebase_service_mod = ModuleType("api.db.services.knowledgebase_service")
class _StubKnowledgebaseService:
@staticmethod
def create_with_name(**_kwargs):
return True, {"id": "kb-1"}
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def get_by_id(_kb_id):
return True, _KB()
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def get_or_none(**_kwargs):
return _KB()
@staticmethod
def delete_by_id(_kb_id):
return True
@staticmethod
def update_by_id(_kb_id, _payload):
return True
@staticmethod
def get_kb_by_id(_kb_id, _tenant_id):
return [SimpleNamespace(id=_kb_id)]
@staticmethod
def get_kb_by_name(_name, _tenant_id):
return [SimpleNamespace(name=_name)]
@staticmethod
def get_list(*_args, **_kwargs):
return [], 0
@staticmethod
def accessible(_dataset_id, _tenant_id):
return True
knowledgebase_service_mod.KnowledgebaseService = _StubKnowledgebaseService
monkeypatch.setitem(sys.modules, "api.db.services.knowledgebase_service", knowledgebase_service_mod)
services_pkg.knowledgebase_service = knowledgebase_service_mod
task_service_mod = ModuleType("api.db.services.task_service")
class _StubTaskService:
@staticmethod
def get_by_id(_task_id):
return False, None
task_service_mod.GRAPH_RAPTOR_FAKE_DOC_ID = "fake-doc"
task_service_mod.TaskService = _StubTaskService
monkeypatch.setitem(sys.modules, "api.db.services.task_service", task_service_mod)
services_pkg.task_service = task_service_mod
user_service_mod = ModuleType("api.db.services.user_service")
class _StubTenantService:
@staticmethod
def get_by_id(_tenant_id):
return True, SimpleNamespace(embd_id="embd-default")
@staticmethod
def get_joined_tenants_by_user_id(_tenant_id):
return [{"tenant_id": "tenant-1"}]
user_service_mod.TenantService = _StubTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
services_pkg.user_service = user_service_mod
constants_mod = ModuleType("common.constants")
class _RetCode:
SUCCESS = 0
ARGUMENT_ERROR = 101
DATA_ERROR = 102
AUTHENTICATION_ERROR = 108
class _FileSource:
KNOWLEDGEBASE = "knowledgebase"
class _StatusEnum(Enum):
VALID = "valid"
constants_mod.RetCode = _RetCode
constants_mod.FileSource = _FileSource
constants_mod.StatusEnum = _StatusEnum
constants_mod.PAGERANK_FLD = "pagerank"
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
common_pkg.settings = SimpleNamespace(
docStoreConn=SimpleNamespace(
delete_idx=lambda *_args, **_kwargs: None,
delete=lambda *_args, **_kwargs: None,
update=lambda *_args, **_kwargs: None,
index_exist=lambda *_args, **_kwargs: False,
),
retriever=SimpleNamespace(search=lambda *_args, **_kwargs: _AwaitableValue(SimpleNamespace(ids=[], field={}))),
)
monkeypatch.setitem(sys.modules, "common", common_pkg)
api_utils_mod = ModuleType("api.utils.api_utils")
def _deep_merge(base, updates):
merged = deepcopy(base)
for key, value in updates.items():
if isinstance(value, dict) and isinstance(merged.get(key), dict):
merged[key] = _deep_merge(merged[key], value)
else:
merged[key] = value
return merged
def _get_result(*, data=None, message="", code=_RetCode.SUCCESS, total=None):
payload = {"code": code, "data": data, "message": message}
if total is not None:
payload["total"] = total
return payload
def _get_error_argument_result(message=""):
return _get_result(code=_RetCode.ARGUMENT_ERROR, message=message)
def _get_error_data_result(message=""):
return _get_result(code=_RetCode.DATA_ERROR, message=message)
def _get_error_permission_result(message=""):
return _get_result(code=_RetCode.AUTHENTICATION_ERROR, message=message)
def _token_required(func):
@functools.wraps(func)
async def _async_wrapper(*args, **kwargs):
return await func(*args, **kwargs)
@functools.wraps(func)
def _sync_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _async_wrapper if asyncio.iscoroutinefunction(func) else _sync_wrapper
api_utils_mod.deep_merge = _deep_merge
api_utils_mod.get_error_argument_result = _get_error_argument_result
api_utils_mod.get_error_data_result = _get_error_data_result
api_utils_mod.get_error_permission_result = _get_error_permission_result
api_utils_mod.get_parser_config = lambda _chunk_method, _unused: {"auto": True}
api_utils_mod.get_result = _get_result
api_utils_mod.remap_dictionary_keys = lambda data: data
api_utils_mod.token_required = _token_required
api_utils_mod.verify_embedding_availability = lambda _embd_id, _tenant_id: (True, None)
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
async def _parse_json(*_args, **_kwargs):
return {}, None
def _parse_args(*_args, **_kwargs):
return {"name": "", "page": 1, "page_size": 30, "orderby": "create_time", "desc": True}, None
validation_spec = importlib.util.spec_from_file_location(
"api.utils.validation_utils", repo_root / "api" / "utils" / "validation_utils.py"
)
validation_mod = importlib.util.module_from_spec(validation_spec)
monkeypatch.setitem(sys.modules, "api.utils.validation_utils", validation_mod)
validation_spec.loader.exec_module(validation_mod)
validation_mod.validate_and_parse_json_request = _parse_json
validation_mod.validate_and_parse_request_args = _parse_args
rag_pkg = ModuleType("rag")
rag_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag", rag_pkg)
rag_nlp_pkg = ModuleType("rag.nlp")
rag_nlp_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.nlp", rag_nlp_pkg)
search_mod = ModuleType("rag.nlp.search")
search_mod.index_name = lambda _tenant_id: "idx"
monkeypatch.setitem(sys.modules, "rag.nlp.search", search_mod)
rag_nlp_pkg.search = search_mod
module_name = "test_dataset_sdk_routes_unit_module"
module_path = repo_root / "api" / "apps" / "sdk" / "dataset.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_create_route_error_matrix_unit(monkeypatch):
module = _load_dataset_module(monkeypatch)
req_state = {"name": "kb"}
_patch_json_parser(monkeypatch, module, req_state)
monkeypatch.setattr(module.KnowledgebaseService, "create_with_name", lambda **_kwargs: (False, {"code": 777, "message": "early"}))
res = _run(inspect.unwrap(module.create)("tenant-1"))
assert res["code"] == 777, res
monkeypatch.setattr(module.KnowledgebaseService, "create_with_name", lambda **_kwargs: (True, {"id": "kb-1"}))
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tenant_id: (False, None))
res = _run(inspect.unwrap(module.create)("tenant-1"))
assert res["message"] == "Tenant not found", res
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tenant_id: (True, SimpleNamespace(embd_id="embd-1")))
monkeypatch.setattr(module.KnowledgebaseService, "save", lambda **_kwargs: False)
res = _run(inspect.unwrap(module.create)("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "save", lambda **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(inspect.unwrap(module.create)("tenant-1"))
assert "Dataset created failed" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "save", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("save boom")))
res = _run(inspect.unwrap(module.create)("tenant-1"))
assert res["message"] == "Database operation failed", res
@pytest.mark.p2
def test_delete_route_error_summary_matrix_unit(monkeypatch):
module = _load_dataset_module(monkeypatch)
req_state = {"ids": ["kb-1"]}
_patch_json_parser(monkeypatch, module, req_state)
kb = _KB(kb_id="kb-1", name="kb-1", tenant_id="tenant-1")
monkeypatch.setattr(module.KnowledgebaseService, "get_or_none", lambda **_kwargs: kb)
monkeypatch.setattr(module.DocumentService, "query", lambda **_kwargs: [SimpleNamespace(id="doc-1")])
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: False)
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(delete_idx=lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("drop failed"))))
monkeypatch.setattr(module.KnowledgebaseService, "delete_by_id", lambda _kb_id: False)
res = _run(inspect.unwrap(module.delete)("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Successfully deleted 0 datasets" in res["message"], res
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(delete_idx=lambda *_args, **_kwargs: None))
monkeypatch.setattr(module.KnowledgebaseService, "delete_by_id", lambda _kb_id: True)
res = _run(inspect.unwrap(module.delete)("tenant-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["success_count"] == 1, res
assert res["data"]["errors"], res
req_state["ids"] = None
monkeypatch.setattr(
module.KnowledgebaseService,
"query",
lambda **_kwargs: (_ for _ in ()).throw(module.OperationalError("db down")),
)
res = _run(inspect.unwrap(module.delete)("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR, res
assert res["message"] == "Database operation failed", res
@pytest.mark.p2
def test_update_route_branch_matrix_unit(monkeypatch):
module = _load_dataset_module(monkeypatch)
req_state = {"name": "new"}
_patch_json_parser(monkeypatch, module, req_state)
monkeypatch.setattr(module.KnowledgebaseService, "get_or_none", lambda **_kwargs: None)
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
kb = _KB(kb_id="kb-1", name="old", chunk_num=0)
def _get_or_none_duplicate(**kwargs):
if kwargs.get("id"):
return kb
if kwargs.get("name"):
return SimpleNamespace(id="dup")
return None
monkeypatch.setattr(module.KnowledgebaseService, "get_or_none", _get_or_none_duplicate)
req_state.clear()
req_state.update({"name": "new"})
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert "already exists" in res["message"], res
kb_chunked = _KB(kb_id="kb-1", name="old", chunk_num=2, embd_id="embd-1")
monkeypatch.setattr(module.KnowledgebaseService, "get_or_none", lambda **kwargs: kb_chunked if kwargs.get("id") else None)
req_state.clear()
req_state.update({"embd_id": "embd-2"})
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert "chunk_num" in res["message"], res
kb_rank = _KB(kb_id="kb-1", name="old", pagerank=0)
monkeypatch.setattr(module.KnowledgebaseService, "get_or_none", lambda **kwargs: kb_rank if kwargs.get("id") else None)
req_state.clear()
req_state.update({"pagerank": 3})
os.environ["DOC_ENGINE"] = "infinity"
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert "doc_engine" in res["message"], res
os.environ.pop("DOC_ENGINE", None)
update_calls = []
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(update=lambda *args, **_kwargs: update_calls.append(args)))
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _KB(kb_id="kb-1", pagerank=3)))
req_state.clear()
req_state.update({"pagerank": 3})
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert update_calls and update_calls[-1][0] == {"kb_id": "kb-1"}, update_calls
update_calls.clear()
monkeypatch.setattr(module.KnowledgebaseService, "get_or_none", lambda **kwargs: _KB(kb_id="kb-1", pagerank=3) if kwargs.get("id") else None)
req_state.clear()
req_state.update({"pagerank": 0})
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert update_calls and update_calls[-1][0] == {"exists": module.PAGERANK_FLD}, update_calls
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: False)
req_state.clear()
req_state.update({"description": "changed"})
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert "Update dataset error" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert "Dataset created failed" in res["message"], res
monkeypatch.setattr(
module.KnowledgebaseService,
"get_or_none",
lambda **_kwargs: (_ for _ in ()).throw(module.OperationalError("update down")),
)
res = _run(inspect.unwrap(module.update)("tenant-1", "kb-1"))
assert res["message"] == "Database operation failed", res
@pytest.mark.p2
def test_list_knowledge_graph_delete_kg_matrix_unit(monkeypatch):
module = _load_dataset_module(monkeypatch)
_set_request_args(monkeypatch, module, {"id": "", "name": "", "page": 1, "page_size": 30, "orderby": "create_time", "desc": True})
monkeypatch.setattr(
module,
"validate_and_parse_request_args",
lambda *_args, **_kwargs: ({"name": "", "page": 1, "page_size": 30, "orderby": "create_time", "desc": True}, None),
)
monkeypatch.setattr(
module.KnowledgebaseService,
"get_list",
lambda *_args, **_kwargs: (_ for _ in ()).throw(module.OperationalError("list down")),
)
res = module.list_datasets("tenant-1")
assert res["code"] == module.RetCode.DATA_ERROR, res
assert res["message"] == "Database operation failed", res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = _run(inspect.unwrap(module.knowledge_graph)("tenant-1", "kb-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _KB(tenant_id="tenant-1")))
monkeypatch.setattr(module.search, "index_name", lambda _tenant_id: "idx")
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(index_exist=lambda *_args, **_kwargs: False))
res = _run(inspect.unwrap(module.knowledge_graph)("tenant-1", "kb-1"))
assert res["data"] == {"graph": {}, "mind_map": {}}, res
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(index_exist=lambda *_args, **_kwargs: True))
class _EmptyRetriever:
async def search(self, *_args, **_kwargs):
return SimpleNamespace(ids=[], field={})
monkeypatch.setattr(module.settings, "retriever", _EmptyRetriever())
res = _run(inspect.unwrap(module.knowledge_graph)("tenant-1", "kb-1"))
assert res["data"] == {"graph": {}, "mind_map": {}}, res
class _BadRetriever:
async def search(self, *_args, **_kwargs):
return SimpleNamespace(ids=["bad"], field={"bad": {"knowledge_graph_kwd": "graph", "content_with_weight": "{bad"}})
monkeypatch.setattr(module.settings, "retriever", _BadRetriever())
res = _run(inspect.unwrap(module.knowledge_graph)("tenant-1", "kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["graph"] == {}, res
payload = {
"nodes": [{"id": "n2", "pagerank": 2}, {"id": "n1", "pagerank": 5}],
"edges": [
{"source": "n1", "target": "n2", "weight": 2},
{"source": "n1", "target": "n1", "weight": 10},
{"source": "n1", "target": "n3", "weight": 9},
],
}
class _GoodRetriever:
async def search(self, *_args, **_kwargs):
return SimpleNamespace(ids=["good"], field={"good": {"knowledge_graph_kwd": "graph", "content_with_weight": json.dumps(payload)}})
monkeypatch.setattr(module.settings, "retriever", _GoodRetriever())
res = _run(inspect.unwrap(module.knowledge_graph)("tenant-1", "kb-1"))
assert res["code"] == module.RetCode.SUCCESS, res
assert len(res["data"]["graph"]["nodes"]) == 2, res
assert len(res["data"]["graph"]["edges"]) == 1, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.delete_knowledge_graph)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
@pytest.mark.p2
def test_run_trace_graphrag_matrix_unit(monkeypatch):
module = _load_dataset_module(monkeypatch)
warnings = []
monkeypatch.setattr(module.logging, "warning", lambda msg, *_args, **_kwargs: warnings.append(msg))
res = inspect.unwrap(module.run_graphrag)("tenant-1", "")
assert 'Dataset ID' in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.run_graphrag)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = inspect.unwrap(module.run_graphrag)("tenant-1", "kb-1")
assert "Invalid Dataset ID" in res["message"], res
stale_kb = _KB(kb_id="kb-1", graphrag_task_id="task-old")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, stale_kb))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", lambda **_kwargs: ([{"id": "doc-1"}], 1))
monkeypatch.setattr(module, "queue_raptor_o_graphrag_tasks", lambda **_kwargs: "task-new")
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: True)
res = inspect.unwrap(module.run_graphrag)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert any("GraphRAG" in msg for msg in warnings), warnings
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (True, SimpleNamespace(progress=0)))
res = inspect.unwrap(module.run_graphrag)("tenant-1", "kb-1")
assert "already running" in res["message"], res
warnings.clear()
queue_calls = {}
no_task_kb = _KB(kb_id="kb-1", graphrag_task_id="")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, no_task_kb))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", lambda **_kwargs: ([{"id": "doc-1"}, {"id": "doc-2"}], 2))
def _queue(**kwargs):
queue_calls.update(kwargs)
return "queued-id"
monkeypatch.setattr(module, "queue_raptor_o_graphrag_tasks", _queue)
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.run_graphrag)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["graphrag_task_id"] == "queued-id", res
assert queue_calls["doc_ids"] == ["doc-1", "doc-2"], queue_calls
assert any("Cannot save graphrag_task_id" in msg for msg in warnings), warnings
res = inspect.unwrap(module.trace_graphrag)("tenant-1", "")
assert 'Dataset ID' in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.trace_graphrag)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = inspect.unwrap(module.trace_graphrag)("tenant-1", "kb-1")
assert "Invalid Dataset ID" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _KB(kb_id="kb-1", graphrag_task_id="task-1")))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
res = inspect.unwrap(module.trace_graphrag)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] == {}, res
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (True, SimpleNamespace(to_dict=lambda: {"id": _task_id, "progress": 1})))
res = inspect.unwrap(module.trace_graphrag)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["id"] == "task-1", res
@pytest.mark.p2
def test_run_trace_raptor_matrix_unit(monkeypatch):
module = _load_dataset_module(monkeypatch)
warnings = []
monkeypatch.setattr(module.logging, "warning", lambda msg, *_args, **_kwargs: warnings.append(msg))
res = inspect.unwrap(module.run_raptor)("tenant-1", "")
assert 'Dataset ID' in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.run_raptor)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = inspect.unwrap(module.run_raptor)("tenant-1", "kb-1")
assert "Invalid Dataset ID" in res["message"], res
stale_kb = _KB(kb_id="kb-1", raptor_task_id="task-old")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, stale_kb))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", lambda **_kwargs: ([{"id": "doc-1"}], 1))
monkeypatch.setattr(module, "queue_raptor_o_graphrag_tasks", lambda **_kwargs: "task-new")
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: True)
res = inspect.unwrap(module.run_raptor)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert any("RAPTOR" in msg for msg in warnings), warnings
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (True, SimpleNamespace(progress=0)))
res = inspect.unwrap(module.run_raptor)("tenant-1", "kb-1")
assert "already running" in res["message"], res
warnings.clear()
no_task_kb = _KB(kb_id="kb-1", raptor_task_id="")
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, no_task_kb))
monkeypatch.setattr(module.DocumentService, "get_by_kb_id", lambda **_kwargs: ([{"id": "doc-1"}], 1))
monkeypatch.setattr(module, "queue_raptor_o_graphrag_tasks", lambda **_kwargs: "queued-raptor")
monkeypatch.setattr(module.KnowledgebaseService, "update_by_id", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.run_raptor)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["raptor_task_id"] == "queued-raptor", res
assert any("Cannot save raptor_task_id" in msg for msg in warnings), warnings
res = inspect.unwrap(module.trace_raptor)("tenant-1", "")
assert 'Dataset ID' in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: False)
res = inspect.unwrap(module.trace_raptor)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = inspect.unwrap(module.trace_raptor)("tenant-1", "kb-1")
assert "Invalid Dataset ID" in res["message"], res
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, _KB(kb_id="kb-1", raptor_task_id="task-1")))
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (False, None))
res = inspect.unwrap(module.trace_raptor)("tenant-1", "kb-1")
assert "RAPTOR Task Not Found" in res["message"], res
monkeypatch.setattr(module.TaskService, "get_by_id", lambda _task_id: (True, SimpleNamespace(to_dict=lambda: {"id": _task_id, "progress": -1})))
res = inspect.unwrap(module.trace_raptor)("tenant-1", "kb-1")
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"]["id"] == "task-1", res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dataset_management/test_dataset_sdk_routes_unit.py",
"license": "Apache License 2.0",
"lines": 615,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_dialog_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import inspect
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
from functools import wraps
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None):
return super().get(key, default)
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(payload))
def _set_request_args(monkeypatch, module, args):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args(args)))
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_dialog_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args=_Args())
monkeypatch.setitem(sys.modules, "quart", quart_mod)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="tenant-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_pkg.apps = apps_mod
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
services_pkg.duplicate_name = lambda _checker, **kwargs: kwargs.get("name", "")
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
dialog_service_mod = ModuleType("api.db.services.dialog_service")
class _DialogService:
model = SimpleNamespace(create_time="create_time")
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def get_by_id(_id):
return True, SimpleNamespace(to_dict=lambda: {"id": _id, "kb_ids": []})
@staticmethod
def get_by_tenant_ids(*_args, **_kwargs):
return [], 0
@staticmethod
def update_many_by_id(_payload):
return True
dialog_service_mod.DialogService = _DialogService
monkeypatch.setitem(sys.modules, "api.db.services.dialog_service", dialog_service_mod)
tenant_llm_service_mod = ModuleType("api.db.services.tenant_llm_service")
class _TenantLLMService:
@staticmethod
def split_model_name_and_factory(embd_id):
return embd_id.split("@")
tenant_llm_service_mod.TenantLLMService = _TenantLLMService
monkeypatch.setitem(sys.modules, "api.db.services.tenant_llm_service", tenant_llm_service_mod)
knowledgebase_service_mod = ModuleType("api.db.services.knowledgebase_service")
class _KnowledgebaseService:
@staticmethod
def get_by_ids(_ids):
return []
@staticmethod
def get_by_id(_id):
return False, None
@staticmethod
def query(**_kwargs):
return []
knowledgebase_service_mod.KnowledgebaseService = _KnowledgebaseService
monkeypatch.setitem(sys.modules, "api.db.services.knowledgebase_service", knowledgebase_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
class _TenantService:
@staticmethod
def get_by_id(_id):
return True, SimpleNamespace(llm_id="llm-default")
class _UserTenantService:
@staticmethod
def query(**_kwargs):
return [SimpleNamespace(tenant_id="tenant-1")]
user_service_mod.TenantService = _TenantService
user_service_mod.UserTenantService = _UserTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
from common.constants import RetCode
async def _default_request_json():
return {}
def _get_data_error_result(code=RetCode.DATA_ERROR, message="Sorry! Data missing!"):
return {"code": code, "message": message}
def _get_json_result(code=RetCode.SUCCESS, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _server_error_response(error):
return {"code": RetCode.EXCEPTION_ERROR, "message": repr(error)}
def _validate_request(*_args, **_kwargs):
def _decorator(func):
if inspect.iscoroutinefunction(func):
@wraps(func)
async def _wrapped(*func_args, **func_kwargs):
return await func(*func_args, **func_kwargs)
return _wrapped
@wraps(func)
def _wrapped(*func_args, **func_kwargs):
return func(*func_args, **func_kwargs)
return _wrapped
return _decorator
api_utils_mod.get_request_json = _default_request_json
api_utils_mod.get_data_error_result = _get_data_error_result
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.server_error_response = _server_error_response
api_utils_mod.validate_request = _validate_request
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
module_name = "test_dialog_routes_unit_module"
module_path = repo_root / "api" / "apps" / "dialog_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_set_dialog_branch_matrix_unit(monkeypatch):
module = _load_dialog_module(monkeypatch)
handler = inspect.unwrap(module.set_dialog)
_set_request_json(monkeypatch, module, {"name": 1, "prompt_config": {"system": "", "parameters": []}})
res = _run(handler())
assert res["message"] == "Dialog name must be string."
_set_request_json(monkeypatch, module, {"name": " ", "prompt_config": {"system": "", "parameters": []}})
res = _run(handler())
assert res["message"] == "Dialog name can't be empty."
_set_request_json(monkeypatch, module, {"name": "a" * 256, "prompt_config": {"system": "", "parameters": []}})
res = _run(handler())
assert res["message"] == "Dialog name length is 256 which is larger than 255"
captured = {}
def _dup_name(checker, **kwargs):
assert checker(name=kwargs["name"]) is True
return kwargs["name"] + " (1)"
monkeypatch.setattr(module, "duplicate_name", _dup_name)
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(name="new dialog")])
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _id: (True, SimpleNamespace(llm_id="llm-x")))
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [SimpleNamespace(embd_id="embd-a@builtin")])
monkeypatch.setattr(module.TenantLLMService, "split_model_name_and_factory", lambda embd_id: embd_id.split("@"))
monkeypatch.setattr(module.DialogService, "save", lambda **kwargs: captured.update(kwargs) or False)
_set_request_json(
monkeypatch,
module,
{
"name": "New Dialog",
"kb_ids": ["kb-1"],
"prompt_config": {"system": "Use {knowledge}", "parameters": []},
},
)
res = _run(handler())
assert res["message"] == "Fail to new a dialog!"
assert captured["name"] == "New Dialog (1)"
assert captured["prompt_config"]["parameters"] == [{"key": "knowledge", "optional": False}]
_set_request_json(
monkeypatch,
module,
{
"dialog_id": "dialog-1",
"name": "Update",
"kb_ids": [],
"prompt_config": {
"system": "Use {knowledge}",
"parameters": [{"key": "knowledge", "optional": True}],
},
},
)
res = _run(handler())
assert "Please remove `{knowledge}` in system prompt" in res["message"]
_set_request_json(
monkeypatch,
module,
{"name": "demo", "prompt_config": {"system": "hello", "parameters": [{"key": "must", "optional": False}]}},
)
res = _run(handler())
assert "Parameter 'must' is not used" in res["message"]
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _id: (False, None))
_set_request_json(monkeypatch, module, {"name": "demo", "prompt_config": {"system": "hello", "parameters": []}})
res = _run(handler())
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _id: (True, SimpleNamespace(llm_id="llm-x")))
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"name": "demo",
"kb_ids": ["kb-1", "kb-2"],
"prompt_config": {"system": "hello", "parameters": []},
}
),
)
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_ids",
lambda _ids: [SimpleNamespace(embd_id="embd-a@f1"), SimpleNamespace(embd_id="embd-b@f2")],
)
monkeypatch.setattr(module.TenantLLMService, "split_model_name_and_factory", lambda embd_id: embd_id.split("@"))
res = _run(handler())
assert "Datasets use different embedding models" in res["message"]
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
monkeypatch.setattr(
module,
"get_request_json",
lambda: _AwaitableValue(
{
"name": "optional-param-dialog",
"prompt_config": {"system": "hello", "parameters": [{"key": "ignored", "optional": True}]},
}
),
)
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [])
monkeypatch.setattr(module.DialogService, "save", lambda **_kwargs: False)
res = _run(handler())
assert res["message"] == "Fail to new a dialog!"
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [])
monkeypatch.setattr(module.DialogService, "update_by_id", lambda *_args, **_kwargs: False)
_set_request_json(
monkeypatch,
module,
{
"dialog_id": "dialog-1",
"kb_names": ["legacy"],
"name": "rename",
"prompt_config": {"system": "hello", "parameters": []},
},
)
res = _run(handler())
assert res["message"] == "Dialog not found!"
monkeypatch.setattr(module.DialogService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (False, None))
_set_request_json(
monkeypatch,
module,
{
"dialog_id": "dialog-1",
"name": "rename",
"prompt_config": {"system": "hello", "parameters": []},
},
)
res = _run(handler())
assert res["message"] == "Fail to update a dialog!"
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (True, SimpleNamespace(to_dict=lambda: {"id": _id, "kb_ids": ["kb-1"]})))
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_id",
lambda _id: (True, SimpleNamespace(status=module.StatusEnum.VALID.value, name="KB One")),
)
_set_request_json(
monkeypatch,
module,
{
"dialog_id": "dialog-1",
"kb_names": ["legacy"],
"name": "new-name",
"prompt_config": {"system": "hello", "parameters": []},
},
)
res = _run(handler())
assert res["code"] == 0
assert res["data"]["name"] == "new-name"
assert res["data"]["kb_names"] == ["KB One"]
def _raise_tenant(_id):
raise RuntimeError("set boom")
monkeypatch.setattr(module.TenantService, "get_by_id", _raise_tenant)
_set_request_json(monkeypatch, module, {"name": "demo", "prompt_config": {"system": "hello", "parameters": []}})
res = _run(handler())
assert "set boom" in res["message"]
@pytest.mark.p2
def test_get_get_kb_names_and_list_dialogs_exception_matrix_unit(monkeypatch):
module = _load_dialog_module(monkeypatch)
get_handler = inspect.unwrap(module.get)
monkeypatch.setattr(
module.DialogService,
"get_by_id",
lambda _id: (True, SimpleNamespace(to_dict=lambda: {"id": _id, "kb_ids": ["kb-1", "kb-2"]})),
)
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_id",
lambda kid: (
(True, SimpleNamespace(status=module.StatusEnum.VALID.value, name="KB-1"))
if kid == "kb-1"
else (False, None)
),
)
_set_request_args(monkeypatch, module, {"dialog_id": "dialog-1"})
res = get_handler()
assert res["code"] == 0
assert res["data"]["kb_ids"] == ["kb-1"]
assert res["data"]["kb_names"] == ["KB-1"]
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (False, None))
_set_request_args(monkeypatch, module, {"dialog_id": "dialog-missing"})
res = get_handler()
assert res["message"] == "Dialog not found!"
def _raise_get(_id):
raise RuntimeError("get boom")
monkeypatch.setattr(module.DialogService, "get_by_id", _raise_get)
_set_request_args(monkeypatch, module, {"dialog_id": "dialog-1"})
res = get_handler()
assert "get boom" in res["message"]
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_id",
lambda kid: (
(True, SimpleNamespace(status=module.StatusEnum.VALID.value, name=f"KB-{kid}"))
if kid.startswith("ok")
else (True, SimpleNamespace(status=module.StatusEnum.INVALID.value, name=f"BAD-{kid}"))
),
)
ids, names = module.get_kb_names(["ok-1", "bad-1", "ok-2"])
assert ids == ["ok-1", "ok-2"]
assert names == ["KB-ok-1", "KB-ok-2"]
def _raise_list(**_kwargs):
raise RuntimeError("list boom")
monkeypatch.setattr(module.DialogService, "query", _raise_list)
res = module.list_dialogs()
assert "list boom" in res["message"]
@pytest.mark.p2
def test_list_dialogs_next_owner_desc_and_pagination_matrix_unit(monkeypatch):
module = _load_dialog_module(monkeypatch)
handler = inspect.unwrap(module.list_dialogs_next)
calls = []
def _get_by_tenant_ids(tenants, user_id, page_number, items_per_page, orderby, desc, keywords, parser_id):
calls.append(
{
"tenants": tenants,
"user_id": user_id,
"page_number": page_number,
"items_per_page": items_per_page,
"orderby": orderby,
"desc": desc,
"keywords": keywords,
"parser_id": parser_id,
}
)
if tenants:
return (
[
{"id": "dialog-1", "tenant_id": "tenant-a"},
{"id": "dialog-2", "tenant_id": "tenant-x"},
{"id": "dialog-3", "tenant_id": "tenant-b"},
],
3,
)
return ([{"id": "dialog-0", "tenant_id": "tenant-1"}], 1)
monkeypatch.setattr(module.DialogService, "get_by_tenant_ids", _get_by_tenant_ids)
_set_request_args(
monkeypatch,
module,
{
"keywords": "k",
"page": "1",
"page_size": "2",
"parser_id": "parser-x",
"orderby": "create_time",
"desc": "false",
},
)
_set_request_json(monkeypatch, module, {"owner_ids": []})
res = _run(handler())
assert res["code"] == 0
assert res["data"]["total"] == 1
assert calls[-1]["tenants"] == []
assert calls[-1]["desc"] is False
_set_request_args(monkeypatch, module, {"page": "2", "page_size": "1"})
_set_request_json(monkeypatch, module, {"owner_ids": ["tenant-a", "tenant-b"]})
res = _run(handler())
assert res["code"] == 0
assert res["data"]["total"] == 2
assert res["data"]["dialogs"] == [{"id": "dialog-3", "tenant_id": "tenant-b"}]
assert calls[-1]["page_number"] == 0
assert calls[-1]["items_per_page"] == 0
assert calls[-1]["desc"] is True
def _raise_next(*_args, **_kwargs):
raise RuntimeError("next boom")
monkeypatch.setattr(module.DialogService, "get_by_tenant_ids", _raise_next)
_set_request_args(monkeypatch, module, {"page": "1", "page_size": "1"})
_set_request_json(monkeypatch, module, {"owner_ids": []})
res = _run(handler())
assert "next boom" in res["message"]
@pytest.mark.p2
def test_rm_permission_and_exception_matrix_unit(monkeypatch):
module = _load_dialog_module(monkeypatch)
handler = inspect.unwrap(module.rm)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-a")])
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
_set_request_json(monkeypatch, module, {"dialog_ids": ["dialog-1"]})
res = _run(handler())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "Only owner of dialog authorized for this operation." in res["message"]
def _raise_query(**_kwargs):
raise RuntimeError("rm boom")
monkeypatch.setattr(module.DialogService, "query", _raise_query)
_set_request_json(monkeypatch, module, {"dialog_ids": ["dialog-1"]})
res = _run(handler())
assert "rm boom" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_dialog_app/test_dialog_routes_unit.py",
"license": "Apache License 2.0",
"lines": 446,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_evaluation_app/test_evaluation_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _Args(dict):
def get(self, key, default=None):
return super().get(key, default)
class _DummyRetCode:
SUCCESS = 0
EXCEPTION_ERROR = 100
ARGUMENT_ERROR = 101
DATA_ERROR = 102
OPERATING_ERROR = 103
AUTHENTICATION_ERROR = 109
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
async def _request_json():
return payload
monkeypatch.setattr(module, "get_request_json", _request_json)
def _set_request_args(monkeypatch, module, args=None):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args(args or {})))
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_evaluation_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args=_Args())
monkeypatch.setitem(sys.modules, "quart", quart_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
constants_mod = ModuleType("common.constants")
constants_mod.RetCode = _DummyRetCode
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
common_pkg.constants = constants_mod
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="tenant-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_pkg.apps = apps_mod
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
evaluation_service_mod = ModuleType("api.db.services.evaluation_service")
class _EvaluationService:
@staticmethod
def create_dataset(**_kwargs):
return True, "dataset-1"
@staticmethod
def list_datasets(**_kwargs):
return {"datasets": [], "total": 0}
@staticmethod
def get_dataset(_dataset_id):
return {"id": _dataset_id}
@staticmethod
def update_dataset(_dataset_id, **_kwargs):
return True
@staticmethod
def delete_dataset(_dataset_id):
return True
@staticmethod
def add_test_case(**_kwargs):
return True, "case-1"
@staticmethod
def import_test_cases(**_kwargs):
return 0, 0
@staticmethod
def get_test_cases(_dataset_id):
return []
@staticmethod
def delete_test_case(_case_id):
return True
@staticmethod
def start_evaluation(**_kwargs):
return True, "run-1"
@staticmethod
def get_run_results(_run_id):
return {"id": _run_id}
@staticmethod
def get_recommendations(_run_id):
return []
evaluation_service_mod.EvaluationService = _EvaluationService
monkeypatch.setitem(sys.modules, "api.db.services.evaluation_service", evaluation_service_mod)
utils_pkg = ModuleType("api.utils")
utils_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.utils", utils_pkg)
api_utils_mod = ModuleType("api.utils.api_utils")
async def _default_request_json():
return {}
def _get_data_error_result(code=_DummyRetCode.DATA_ERROR, message="Sorry! Data missing!"):
return {"code": code, "message": message}
def _get_json_result(code=_DummyRetCode.SUCCESS, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _server_error_response(error):
return {"code": _DummyRetCode.EXCEPTION_ERROR, "message": repr(error)}
def _validate_request(*_args, **_kwargs):
def _decorator(func):
return func
return _decorator
api_utils_mod.get_data_error_result = _get_data_error_result
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.get_request_json = _default_request_json
api_utils_mod.server_error_response = _server_error_response
api_utils_mod.validate_request = _validate_request
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
utils_pkg.api_utils = api_utils_mod
module_name = "test_evaluation_routes_unit_module"
module_path = repo_root / "api" / "apps" / "evaluation_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_dataset_routes_matrix_unit(monkeypatch):
module = _load_evaluation_app(monkeypatch)
_set_request_json(monkeypatch, module, {"name": " data-1 ", "description": "desc", "kb_ids": ["kb-1"]})
monkeypatch.setattr(module.EvaluationService, "create_dataset", lambda **_kwargs: (True, "dataset-ok"))
res = _run(module.create_dataset())
assert res["code"] == 0
assert res["data"]["dataset_id"] == "dataset-ok"
_set_request_json(monkeypatch, module, {"name": " ", "kb_ids": ["kb-1"]})
res = _run(module.create_dataset())
assert res["code"] == module.RetCode.DATA_ERROR
assert "empty" in res["message"].lower()
_set_request_json(monkeypatch, module, {"name": "data-2", "kb_ids": "kb-1"})
res = _run(module.create_dataset())
assert res["code"] == module.RetCode.DATA_ERROR
assert "kb_ids" in res["message"]
_set_request_json(monkeypatch, module, {"name": "data-3", "kb_ids": ["kb-1"]})
monkeypatch.setattr(module.EvaluationService, "create_dataset", lambda **_kwargs: (False, "create failed"))
res = _run(module.create_dataset())
assert res["code"] == module.RetCode.DATA_ERROR
assert res["message"] == "create failed"
def _raise_create(**_kwargs):
raise RuntimeError("create boom")
monkeypatch.setattr(module.EvaluationService, "create_dataset", _raise_create)
res = _run(module.create_dataset())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "create boom" in res["message"]
_set_request_args(monkeypatch, module, {"page": "2", "page_size": "3"})
monkeypatch.setattr(module.EvaluationService, "list_datasets", lambda **_kwargs: {"datasets": [{"id": "a"}], "total": 1})
res = _run(module.list_datasets())
assert res["code"] == 0
assert res["data"]["total"] == 1
_set_request_args(monkeypatch, module, {"page": "x"})
res = _run(module.list_datasets())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
monkeypatch.setattr(module.EvaluationService, "get_dataset", lambda _dataset_id: None)
res = _run(module.get_dataset("dataset-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "not found" in res["message"].lower()
monkeypatch.setattr(module.EvaluationService, "get_dataset", lambda _dataset_id: {"id": _dataset_id})
res = _run(module.get_dataset("dataset-2"))
assert res["code"] == 0
assert res["data"]["id"] == "dataset-2"
def _raise_get(_dataset_id):
raise RuntimeError("get dataset boom")
monkeypatch.setattr(module.EvaluationService, "get_dataset", _raise_get)
res = _run(module.get_dataset("dataset-3"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "get dataset boom" in res["message"]
captured = {}
def _update(dataset_id, **kwargs):
captured["dataset_id"] = dataset_id
captured["kwargs"] = kwargs
return True
_set_request_json(
monkeypatch,
module,
{
"id": "forbidden",
"tenant_id": "forbidden",
"created_by": "forbidden",
"create_time": 123,
"name": "new-name",
},
)
monkeypatch.setattr(module.EvaluationService, "update_dataset", _update)
res = _run(module.update_dataset("dataset-4"))
assert res["code"] == 0
assert res["data"]["dataset_id"] == "dataset-4"
assert captured["dataset_id"] == "dataset-4"
assert "id" not in captured["kwargs"]
assert "tenant_id" not in captured["kwargs"]
assert "created_by" not in captured["kwargs"]
assert "create_time" not in captured["kwargs"]
_set_request_json(monkeypatch, module, {"name": "new-name"})
monkeypatch.setattr(module.EvaluationService, "update_dataset", lambda _dataset_id, **_kwargs: False)
res = _run(module.update_dataset("dataset-5"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "failed" in res["message"].lower()
def _raise_update(_dataset_id, **_kwargs):
raise RuntimeError("update boom")
monkeypatch.setattr(module.EvaluationService, "update_dataset", _raise_update)
res = _run(module.update_dataset("dataset-6"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "update boom" in res["message"]
monkeypatch.setattr(module.EvaluationService, "delete_dataset", lambda _dataset_id: False)
res = _run(module.delete_dataset("dataset-7"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "failed" in res["message"].lower()
monkeypatch.setattr(module.EvaluationService, "delete_dataset", lambda _dataset_id: True)
res = _run(module.delete_dataset("dataset-8"))
assert res["code"] == 0
assert res["data"]["dataset_id"] == "dataset-8"
def _raise_delete(_dataset_id):
raise RuntimeError("delete dataset boom")
monkeypatch.setattr(module.EvaluationService, "delete_dataset", _raise_delete)
res = _run(module.delete_dataset("dataset-9"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "delete dataset boom" in res["message"]
@pytest.mark.p2
def test_test_case_routes_matrix_unit(monkeypatch):
module = _load_evaluation_app(monkeypatch)
_set_request_json(monkeypatch, module, {"question": " "})
res = _run(module.add_test_case("dataset-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "question" in res["message"].lower()
_set_request_json(monkeypatch, module, {"question": "q1"})
monkeypatch.setattr(module.EvaluationService, "add_test_case", lambda **_kwargs: (False, "add failed"))
res = _run(module.add_test_case("dataset-2"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "add failed" in res["message"]
_set_request_json(
monkeypatch,
module,
{
"question": "q2",
"reference_answer": "a2",
"relevant_doc_ids": ["doc-1"],
"relevant_chunk_ids": ["chunk-1"],
"metadata": {"k": "v"},
},
)
monkeypatch.setattr(module.EvaluationService, "add_test_case", lambda **_kwargs: (True, "case-ok"))
res = _run(module.add_test_case("dataset-3"))
assert res["code"] == 0
assert res["data"]["case_id"] == "case-ok"
def _raise_add(**_kwargs):
raise RuntimeError("add case boom")
monkeypatch.setattr(module.EvaluationService, "add_test_case", _raise_add)
res = _run(module.add_test_case("dataset-4"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "add case boom" in res["message"]
_set_request_json(monkeypatch, module, {"cases": {}})
res = _run(module.import_test_cases("dataset-5"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "cases" in res["message"]
_set_request_json(monkeypatch, module, {"cases": [{"question": "q1"}, {"question": "q2"}]})
monkeypatch.setattr(module.EvaluationService, "import_test_cases", lambda **_kwargs: (2, 0))
res = _run(module.import_test_cases("dataset-6"))
assert res["code"] == 0
assert res["data"]["success_count"] == 2
assert res["data"]["failure_count"] == 0
assert res["data"]["total"] == 2
def _raise_import(**_kwargs):
raise RuntimeError("import boom")
monkeypatch.setattr(module.EvaluationService, "import_test_cases", _raise_import)
res = _run(module.import_test_cases("dataset-7"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "import boom" in res["message"]
monkeypatch.setattr(module.EvaluationService, "get_test_cases", lambda _dataset_id: [{"id": "case-1"}])
res = _run(module.get_test_cases("dataset-8"))
assert res["code"] == 0
assert res["data"]["total"] == 1
assert res["data"]["cases"][0]["id"] == "case-1"
def _raise_get_cases(_dataset_id):
raise RuntimeError("get cases boom")
monkeypatch.setattr(module.EvaluationService, "get_test_cases", _raise_get_cases)
res = _run(module.get_test_cases("dataset-9"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "get cases boom" in res["message"]
monkeypatch.setattr(module.EvaluationService, "delete_test_case", lambda _case_id: False)
res = _run(module.delete_test_case("case-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "failed" in res["message"].lower()
monkeypatch.setattr(module.EvaluationService, "delete_test_case", lambda _case_id: True)
res = _run(module.delete_test_case("case-2"))
assert res["code"] == 0
assert res["data"]["case_id"] == "case-2"
def _raise_delete_case(_case_id):
raise RuntimeError("delete case boom")
monkeypatch.setattr(module.EvaluationService, "delete_test_case", _raise_delete_case)
res = _run(module.delete_test_case("case-3"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "delete case boom" in res["message"]
@pytest.mark.p2
def test_run_and_recommendation_routes_matrix_unit(monkeypatch):
module = _load_evaluation_app(monkeypatch)
_set_request_json(monkeypatch, module, {"dataset_id": "d1", "dialog_id": "dialog-1", "name": "run 1"})
monkeypatch.setattr(module.EvaluationService, "start_evaluation", lambda **_kwargs: (False, "start failed"))
res = _run(module.start_evaluation())
assert res["code"] == module.RetCode.DATA_ERROR
assert "start failed" in res["message"]
monkeypatch.setattr(module.EvaluationService, "start_evaluation", lambda **_kwargs: (True, "run-ok"))
res = _run(module.start_evaluation())
assert res["code"] == 0
assert res["data"]["run_id"] == "run-ok"
def _raise_start(**_kwargs):
raise RuntimeError("start boom")
monkeypatch.setattr(module.EvaluationService, "start_evaluation", _raise_start)
res = _run(module.start_evaluation())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "start boom" in res["message"]
monkeypatch.setattr(module.EvaluationService, "get_run_results", lambda _run_id: None)
res = _run(module.get_evaluation_run("run-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "not found" in res["message"].lower()
monkeypatch.setattr(module.EvaluationService, "get_run_results", lambda _run_id: {"id": _run_id})
res = _run(module.get_evaluation_run("run-2"))
assert res["code"] == 0
assert res["data"]["id"] == "run-2"
def _raise_get_run(_run_id):
raise RuntimeError("get run boom")
monkeypatch.setattr(module.EvaluationService, "get_run_results", _raise_get_run)
res = _run(module.get_evaluation_run("run-3"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "get run boom" in res["message"]
monkeypatch.setattr(module.EvaluationService, "get_run_results", lambda _run_id: None)
res = _run(module.get_run_results("run-4"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "not found" in res["message"].lower()
monkeypatch.setattr(module.EvaluationService, "get_run_results", lambda _run_id: {"id": _run_id, "score": 0.9})
res = _run(module.get_run_results("run-5"))
assert res["code"] == 0
assert res["data"]["id"] == "run-5"
def _raise_results(_run_id):
raise RuntimeError("get results boom")
monkeypatch.setattr(module.EvaluationService, "get_run_results", _raise_results)
res = _run(module.get_run_results("run-6"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "get results boom" in res["message"]
res = _run(module.list_evaluation_runs())
assert res["code"] == 0
assert res["data"]["total"] == 0
def _raise_json_list(*_args, **_kwargs):
raise RuntimeError("list runs boom")
monkeypatch.setattr(module, "get_json_result", _raise_json_list)
res = _run(module.list_evaluation_runs())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "list runs boom" in res["message"]
monkeypatch.setattr(module, "get_json_result", lambda code=0, message="success", data=None: {"code": code, "message": message, "data": data})
res = _run(module.delete_evaluation_run("run-7"))
assert res["code"] == 0
assert res["data"]["run_id"] == "run-7"
def _raise_json_delete(*_args, **_kwargs):
raise RuntimeError("delete run boom")
monkeypatch.setattr(module, "get_json_result", _raise_json_delete)
res = _run(module.delete_evaluation_run("run-8"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "delete run boom" in res["message"]
monkeypatch.setattr(module, "get_json_result", lambda code=0, message="success", data=None: {"code": code, "message": message, "data": data})
monkeypatch.setattr(module.EvaluationService, "get_recommendations", lambda _run_id: [{"name": "cfg-1"}])
res = _run(module.get_recommendations("run-9"))
assert res["code"] == 0
assert res["data"]["recommendations"][0]["name"] == "cfg-1"
def _raise_recommend(_run_id):
raise RuntimeError("recommend boom")
monkeypatch.setattr(module.EvaluationService, "get_recommendations", _raise_recommend)
res = _run(module.get_recommendations("run-10"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "recommend boom" in res["message"]
@pytest.mark.p2
def test_compare_export_and_evaluate_single_matrix_unit(monkeypatch):
module = _load_evaluation_app(monkeypatch)
_set_request_json(monkeypatch, module, {"run_ids": ["run-1"]})
res = _run(module.compare_runs())
assert res["code"] == module.RetCode.DATA_ERROR
assert "at least 2" in res["message"]
_set_request_json(monkeypatch, module, {"run_ids": ["run-1", "run-2"]})
res = _run(module.compare_runs())
assert res["code"] == 0
assert res["data"]["comparison"] == {}
def _raise_json_compare(*_args, **_kwargs):
raise RuntimeError("compare boom")
monkeypatch.setattr(module, "get_json_result", _raise_json_compare)
_set_request_json(monkeypatch, module, {"run_ids": ["run-1", "run-2", "run-3"]})
res = _run(module.compare_runs())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "compare boom" in res["message"]
monkeypatch.setattr(module, "get_json_result", lambda code=0, message="success", data=None: {"code": code, "message": message, "data": data})
monkeypatch.setattr(module.EvaluationService, "get_run_results", lambda _run_id: None)
res = _run(module.export_results("run-11"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "not found" in res["message"].lower()
monkeypatch.setattr(module.EvaluationService, "get_run_results", lambda _run_id: {"id": _run_id, "rows": []})
res = _run(module.export_results("run-12"))
assert res["code"] == 0
assert res["data"]["id"] == "run-12"
def _raise_export(_run_id):
raise RuntimeError("export boom")
monkeypatch.setattr(module.EvaluationService, "get_run_results", _raise_export)
res = _run(module.export_results("run-13"))
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "export boom" in res["message"]
monkeypatch.setattr(module, "get_json_result", lambda code=0, message="success", data=None: {"code": code, "message": message, "data": data})
res = _run(module.evaluate_single())
assert res["code"] == 0
assert res["data"]["answer"] == ""
assert res["data"]["metrics"] == {}
assert res["data"]["retrieved_chunks"] == []
def _raise_json_single(*_args, **_kwargs):
raise RuntimeError("single boom")
monkeypatch.setattr(module, "get_json_result", _raise_json_single)
res = _run(module.evaluate_single())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "single boom" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_evaluation_app/test_evaluation_routes_unit.py",
"license": "Apache License 2.0",
"lines": 444,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_file_app/test_file2document_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import functools
import importlib.util
import sys
from copy import deepcopy
from enum import Enum
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyFile:
def __init__(self, file_id, file_type, *, name="file.txt", location="loc", size=1):
self.id = file_id
self.type = file_type
self.name = name
self.location = location
self.size = size
class _FalsyFile(_DummyFile):
def __bool__(self):
return False
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload_state):
async def _req_json():
return deepcopy(payload_state)
monkeypatch.setattr(module, "get_request_json", _req_json)
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_file2document_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="user-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_pkg.apps = apps_mod
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
class _FileType(Enum):
FOLDER = "folder"
DOC = "doc"
db_pkg.FileType = _FileType
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
file2document_mod = ModuleType("api.db.services.file2document_service")
class _StubFile2DocumentService:
@staticmethod
def get_by_file_id(_file_id):
return []
@staticmethod
def delete_by_file_id(*_args, **_kwargs):
return None
@staticmethod
def insert(_payload):
return SimpleNamespace(to_json=lambda: {})
file2document_mod.File2DocumentService = _StubFile2DocumentService
monkeypatch.setitem(sys.modules, "api.db.services.file2document_service", file2document_mod)
services_pkg.file2document_service = file2document_mod
file_service_mod = ModuleType("api.db.services.file_service")
class _StubFileService:
@staticmethod
def get_by_ids(_file_ids):
return []
@staticmethod
def get_all_innermost_file_ids(_file_id, _acc):
return []
@staticmethod
def get_by_id(_file_id):
return True, _DummyFile(_file_id, _FileType.DOC.value)
file_service_mod.FileService = _StubFileService
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
services_pkg.file_service = file_service_mod
kb_service_mod = ModuleType("api.db.services.knowledgebase_service")
class _StubKnowledgebaseService:
@staticmethod
def get_by_id(_kb_id):
return False, None
kb_service_mod.KnowledgebaseService = _StubKnowledgebaseService
monkeypatch.setitem(sys.modules, "api.db.services.knowledgebase_service", kb_service_mod)
services_pkg.knowledgebase_service = kb_service_mod
document_service_mod = ModuleType("api.db.services.document_service")
class _StubDocumentService:
@staticmethod
def get_by_id(doc_id):
return True, SimpleNamespace(id=doc_id)
@staticmethod
def get_tenant_id(_doc_id):
return "tenant-1"
@staticmethod
def remove_document(*_args, **_kwargs):
return True
@staticmethod
def insert(_payload):
return SimpleNamespace(id="doc-1")
document_service_mod.DocumentService = _StubDocumentService
monkeypatch.setitem(sys.modules, "api.db.services.document_service", document_service_mod)
services_pkg.document_service = document_service_mod
api_utils_mod = ModuleType("api.utils.api_utils")
def get_json_result(data=None, message="", code=0):
return {"code": code, "data": data, "message": message}
def get_data_error_result(message=""):
return {"code": 102, "data": None, "message": message}
async def get_request_json():
return {}
def server_error_response(err):
return {"code": 500, "data": None, "message": str(err)}
def validate_request(*_keys):
def _decorator(func):
@functools.wraps(func)
async def _wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return _wrapper
return _decorator
api_utils_mod.get_json_result = get_json_result
api_utils_mod.get_data_error_result = get_data_error_result
api_utils_mod.get_request_json = get_request_json
api_utils_mod.server_error_response = server_error_response
api_utils_mod.validate_request = validate_request
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.get_uuid = lambda: "uuid"
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
constants_mod = ModuleType("common.constants")
class _RetCode:
ARGUMENT_ERROR = 101
constants_mod.RetCode = _RetCode
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
module_name = "test_file2document_routes_unit_module"
module_path = repo_root / "api" / "apps" / "file2document_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_convert_branch_matrix_unit(monkeypatch):
module = _load_file2document_module(monkeypatch)
req_state = {"kb_ids": ["kb-1"], "file_ids": ["f1"]}
_set_request_json(monkeypatch, module, req_state)
events = {"deleted": []}
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_FalsyFile("f1", module.FileType.DOC.value)])
res = _run(module.convert())
assert res["message"] == "File not found!"
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile("f1", module.FileType.DOC.value)])
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [SimpleNamespace(document_id="doc-1")])
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.convert())
assert res["message"] == "Document not found!"
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(id=_doc_id)))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: None)
res = _run(module.convert())
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: False)
res = _run(module.convert())
assert "Document removal" in res["message"]
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [])
monkeypatch.setattr(module.File2DocumentService, "delete_by_file_id", lambda file_id: events["deleted"].append(file_id))
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (False, None))
res = _run(module.convert())
assert res["message"] == "Can't find this dataset!"
assert events["deleted"] == ["f1"]
kb = SimpleNamespace(id="kb-1", parser_id="naive", pipeline_id="p1", parser_config={})
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: (True, kb))
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.convert())
assert res["message"] == "Can't find this file!"
req_state["file_ids"] = ["folder-1"]
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _ids: [_DummyFile("folder-1", module.FileType.FOLDER.value, name="folder")])
monkeypatch.setattr(module.FileService, "get_all_innermost_file_ids", lambda _file_id, _acc: ["inner-1"])
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("inner-1", module.FileType.DOC.value, name="inner.txt", location="inner.loc", size=2)),
)
monkeypatch.setattr(module.DocumentService, "insert", lambda _payload: SimpleNamespace(id="doc-new"))
monkeypatch.setattr(
module.File2DocumentService,
"insert",
lambda _payload: SimpleNamespace(to_json=lambda: {"file_id": "inner-1", "document_id": "doc-new"}),
)
res = _run(module.convert())
assert res["code"] == 0
assert res["data"] == [{"file_id": "inner-1", "document_id": "doc-new"}]
req_state["file_ids"] = ["f1"]
monkeypatch.setattr(
module.FileService,
"get_by_ids",
lambda _ids: (_ for _ in ()).throw(RuntimeError("convert boom")),
)
res = _run(module.convert())
assert res["code"] == 500
assert "convert boom" in res["message"]
@pytest.mark.p2
def test_rm_branch_matrix_unit(monkeypatch):
module = _load_file2document_module(monkeypatch)
req_state = {"file_ids": []}
_set_request_json(monkeypatch, module, req_state)
deleted = []
res = _run(module.rm())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert 'Lack of "Files ID"' in res["message"]
req_state["file_ids"] = ["f1"]
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [])
res = _run(module.rm())
assert res["message"] == "Inform not found!"
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [None])
res = _run(module.rm())
assert res["message"] == "Inform not found!"
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [SimpleNamespace(document_id="doc-1")])
monkeypatch.setattr(module.File2DocumentService, "delete_by_file_id", lambda file_id: deleted.append(file_id))
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (False, None))
res = _run(module.rm())
assert res["message"] == "Document not found!"
assert deleted == ["f1"]
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda _doc_id: (True, SimpleNamespace(id=_doc_id)))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: None)
res = _run(module.rm())
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: False)
res = _run(module.rm())
assert "Document removal" in res["message"]
req_state["file_ids"] = ["f1", "f2"]
monkeypatch.setattr(
module.File2DocumentService,
"get_by_file_id",
lambda file_id: [SimpleNamespace(document_id=f"doc-{file_id}")],
)
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda doc_id: (True, SimpleNamespace(id=doc_id)))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant-1")
monkeypatch.setattr(module.DocumentService, "remove_document", lambda *_args, **_kwargs: True)
res = _run(module.rm())
assert res["code"] == 0
assert res["data"] is True
monkeypatch.setattr(
module.File2DocumentService,
"get_by_file_id",
lambda _file_id: (_ for _ in ()).throw(RuntimeError("rm boom")),
)
req_state["file_ids"] = ["boom"]
res = _run(module.rm())
assert res["code"] == 500
assert "rm boom" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_file_app/test_file2document_routes_unit.py",
"license": "Apache License 2.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_file_app/test_file_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from copy import deepcopy
from enum import Enum
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if value is None or type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _DummyUploadFile:
def __init__(self, filename, blob=b"blob"):
self.filename = filename
self._blob = blob
def read(self):
return self._blob
class _DummyFiles(dict):
def __init__(self, file_objs=None):
super().__init__()
self._file_objs = list(file_objs or [])
if file_objs is not None:
self["file"] = self._file_objs
def getlist(self, key):
if key == "file":
return list(self._file_objs)
return []
class _DummyRequest:
def __init__(
self,
*,
args=None,
form=None,
files=None,
json_data=None,
headers=None,
method="POST",
content_length=0,
):
self.args = _Args(args or {})
self.form = _AwaitableValue(form or {})
self.files = _AwaitableValue(files if files is not None else _DummyFiles())
self.json = _AwaitableValue(json_data or {})
self.headers = headers or {}
self.method = method
self.content_length = content_length
class _DummyResponse:
def __init__(self, data):
self.data = data
self.headers = {}
class _DummyFile:
def __init__(
self,
file_id,
file_type,
*,
tenant_id="tenant1",
parent_id="pf1",
location="file.bin",
name="file.txt",
source_type="user",
):
self.id = file_id
self.type = file_type
self.tenant_id = tenant_id
self.parent_id = parent_id
self.location = location
self.name = name
self.source_type = source_type
def to_json(self):
return {"id": self.id, "name": self.name, "type": self.type}
def _run(coro):
return asyncio.run(coro)
def _set_request(
monkeypatch,
module,
*,
args=None,
form=None,
files=None,
json_data=None,
headers=None,
method="POST",
content_length=0,
):
monkeypatch.setattr(
module,
"request",
_DummyRequest(
args=args,
form=form,
files=files,
json_data=json_data,
headers=headers,
method=method,
content_length=content_length,
),
)
def _set_request_json(monkeypatch, module, payload_state):
async def _req_json():
return deepcopy(payload_state)
monkeypatch.setattr(module, "get_request_json", _req_json)
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_file_app_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.request = _DummyRequest()
async def _make_response(data):
return _DummyResponse(data)
quart_mod.make_response = _make_response
monkeypatch.setitem(sys.modules, "quart", quart_mod)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="tenant1", tenant_id="tenant1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_pkg.apps = apps_mod
api_common_pkg = ModuleType("api.common")
api_common_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.common", api_common_pkg)
permission_mod = ModuleType("api.common.check_team_permission")
permission_mod.check_file_team_permission = lambda *_args, **_kwargs: True
monkeypatch.setitem(sys.modules, "api.common.check_team_permission", permission_mod)
api_common_pkg.check_team_permission = permission_mod
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
class _FileType(Enum):
FOLDER = "folder"
VIRTUAL = "virtual"
DOC = "doc"
VISUAL = "visual"
db_pkg.FileType = _FileType
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
services_pkg.duplicate_name = lambda _query, **kwargs: kwargs.get("name", "")
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
document_service_mod = ModuleType("api.db.services.document_service")
class _StubDocumentService:
@staticmethod
def get_doc_count(_uid):
return 0
@staticmethod
def get_by_id(doc_id):
return True, SimpleNamespace(id=doc_id)
@staticmethod
def get_tenant_id(_doc_id):
return "tenant1"
@staticmethod
def remove_document(*_args, **_kwargs):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
document_service_mod.DocumentService = _StubDocumentService
monkeypatch.setitem(sys.modules, "api.db.services.document_service", document_service_mod)
services_pkg.document_service = document_service_mod
file2doc_mod = ModuleType("api.db.services.file2document_service")
class _StubFile2DocumentService:
@staticmethod
def get_by_file_id(_file_id):
return []
@staticmethod
def delete_by_file_id(*_args, **_kwargs):
return None
@staticmethod
def get_storage_address(**_kwargs):
return "bucket2", "location2"
file2doc_mod.File2DocumentService = _StubFile2DocumentService
monkeypatch.setitem(sys.modules, "api.db.services.file2document_service", file2doc_mod)
services_pkg.file2document_service = file2doc_mod
file_service_mod = ModuleType("api.db.services.file_service")
class _StubFileService:
@staticmethod
def get_root_folder(_tenant_id):
return {"id": "root"}
@staticmethod
def get_by_id(file_id):
return True, _DummyFile(file_id, _FileType.DOC.value, name="file.txt")
@staticmethod
def get_id_list_by_id(_pf_id, _names, _index, ids):
return ids
@staticmethod
def create_folder(_file, parent_id, _names, _len_id):
return SimpleNamespace(id=parent_id, name=str(parent_id))
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def insert(data):
return SimpleNamespace(to_json=lambda: data)
@staticmethod
def is_parent_folder_exist(_pf_id):
return True
@staticmethod
def get_by_pf_id(*_args, **_kwargs):
return [], 0
@staticmethod
def get_parent_folder(_file_id):
return SimpleNamespace(to_json=lambda: {"id": "root"})
@staticmethod
def get_all_parent_folders(_file_id):
return []
@staticmethod
def init_knowledgebase_docs(*_args, **_kwargs):
return None
@staticmethod
def list_all_files_by_parent_id(_parent_id):
return []
@staticmethod
def delete(_file):
return True
@staticmethod
def update_by_id(*_args, **_kwargs):
return True
@staticmethod
def get_by_ids(_file_ids):
return []
@staticmethod
def delete_by_id(_file_id):
return True
file_service_mod.FileService = _StubFileService
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
services_pkg.file_service = file_service_mod
api_utils_mod = ModuleType("api.utils.api_utils")
class _RetCode:
SUCCESS = 0
ARGUMENT_ERROR = 101
AUTHENTICATION_ERROR = 401
OPERATING_ERROR = 103
def get_json_result(data=None, message="", code=_RetCode.SUCCESS):
return {"code": code, "data": data, "message": message}
async def get_request_json():
return {}
def get_data_error_result(message=""):
return {"code": _RetCode.OPERATING_ERROR, "data": None, "message": message}
def server_error_response(err):
return {"code": 500, "data": None, "message": str(err)}
def validate_request(*_required):
def _decorator(func):
return func
return _decorator
api_utils_mod.get_json_result = get_json_result
api_utils_mod.get_request_json = get_request_json
api_utils_mod.get_data_error_result = get_data_error_result
api_utils_mod.server_error_response = server_error_response
api_utils_mod.validate_request = validate_request
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
file_utils_mod = ModuleType("api.utils.file_utils")
file_utils_mod.filename_type = lambda _name: _FileType.DOC.value
monkeypatch.setitem(sys.modules, "api.utils.file_utils", file_utils_mod)
web_utils_mod = ModuleType("api.utils.web_utils")
web_utils_mod.CONTENT_TYPE_MAP = {"txt": "text/plain", "json": "application/json"}
web_utils_mod.apply_safe_file_response_headers = (
lambda response, content_type, ext: response.headers.update({"content_type": content_type, "extension": ext})
)
monkeypatch.setitem(sys.modules, "api.utils.web_utils", web_utils_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
settings_mod = ModuleType("common.settings")
settings_mod.STORAGE_IMPL = SimpleNamespace(
obj_exist=lambda *_args, **_kwargs: False,
put=lambda *_args, **_kwargs: None,
rm=lambda *_args, **_kwargs: None,
get=lambda *_args, **_kwargs: b"",
)
common_pkg.settings = settings_mod
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
constants_mod = ModuleType("common.constants")
class _FileSource:
KNOWLEDGEBASE = "knowledgebase"
constants_mod.RetCode = _RetCode
constants_mod.FileSource = _FileSource
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.get_uuid = lambda: "uuid-1"
async def thread_pool_exec(func, *args, **kwargs):
return func(*args, **kwargs)
misc_utils_mod.thread_pool_exec = thread_pool_exec
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
module_name = "test_file_app_routes_unit_module"
module_path = repo_root / "api" / "apps" / "file_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_upload_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
_set_request(monkeypatch, module, form={}, files=_DummyFiles())
res = _run(module.upload())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert res["message"] == "No file part!"
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("")]),
)
res = _run(module.upload())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert res["message"] == "No file selected!"
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("a.txt")]),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.upload())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert res["message"] == "Can't find this folder!"
monkeypatch.setenv("MAX_FILE_NUM_PER_USER", "1")
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("cap.txt")]),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="pf1", name="pf1")))
monkeypatch.setattr(module.DocumentService, "get_doc_count", lambda _uid: 1)
res = _run(module.upload())
assert res["code"] == module.RetCode.SUCCESS
assert "Exceed the maximum file number of a free user!" in res["data"][0]["message"]
monkeypatch.delenv("MAX_FILE_NUM_PER_USER", raising=False)
class _StorageNoCollision:
def __init__(self):
self.put_calls = []
def obj_exist(self, _bucket, _location):
return False
def put(self, bucket, location, blob):
self.put_calls.append((bucket, location, blob))
storage_no_collision = _StorageNoCollision()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage_no_collision)
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile(None, b"none-blob")]),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="pf1", name="pf1")))
monkeypatch.setattr(module.FileService, "get_id_list_by_id", lambda *_args, **_kwargs: ["pf1"])
monkeypatch.setattr(
module.FileService,
"create_folder",
lambda _file, parent_id, _names, _len_id: SimpleNamespace(id=f"{parent_id}-folder"),
)
monkeypatch.setattr(module, "filename_type", lambda _name: module.FileType.DOC.value)
monkeypatch.setattr(module, "duplicate_name", lambda _query, **kwargs: kwargs.get("name"))
monkeypatch.setattr(module, "get_uuid", lambda: "uuid-none")
monkeypatch.setattr(module.FileService, "insert", lambda data: SimpleNamespace(to_json=lambda: {"id": data["id"]}))
res = _run(module.upload())
assert res["code"] == module.RetCode.SUCCESS
assert len(res["data"]) == 1
assert storage_no_collision.put_calls == [("pf1-folder", None, b"none-blob")]
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("dir/a.txt")]),
)
monkeypatch.setattr(module.FileService, "get_id_list_by_id", lambda *_args, **_kwargs: ["pf1", "missing-child"])
def _get_by_id_missing_child(file_id):
if file_id == "missing-child":
return False, None
return True, SimpleNamespace(id=file_id, name=file_id)
monkeypatch.setattr(module.FileService, "get_by_id", _get_by_id_missing_child)
res = _run(module.upload())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"][0]["message"] == "Folder not found!"
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("b.txt")]),
)
monkeypatch.setattr(module.FileService, "get_id_list_by_id", lambda *_args, **_kwargs: ["pf1", "leaf"])
pf1_calls = {"count": 0}
def _get_by_id_missing_parent_else(file_id):
if file_id == "pf1":
pf1_calls["count"] += 1
if pf1_calls["count"] == 1:
return True, SimpleNamespace(id="pf1", name="pf1")
return False, None
return True, SimpleNamespace(id=file_id, name=file_id)
monkeypatch.setattr(module.FileService, "get_by_id", _get_by_id_missing_parent_else)
res = _run(module.upload())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"][0]["message"] == "Folder not found!"
class _StorageCollision:
def __init__(self):
self.obj_calls = 0
self.put_calls = []
def obj_exist(self, _bucket, _location):
self.obj_calls += 1
return self.obj_calls == 1
def put(self, bucket, location, blob):
self.put_calls.append((bucket, location, blob))
storage_collision = _StorageCollision()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage_collision)
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("dir/a.txt", b"a"), _DummyUploadFile("b.txt", b"b")]),
)
def _get_by_id_ok(file_id):
return True, SimpleNamespace(id=file_id, name=file_id)
def _get_id_list(_pf_id, file_obj_names, _idx, _ids):
if file_obj_names[-1] == "a.txt":
return ["pf1", "mid-id"]
return ["pf1", "leaf-id"]
def _create_folder(_file, parent_id, _names, _len_id):
return SimpleNamespace(id=f"{parent_id}-folder")
inserted_payloads = []
monkeypatch.setattr(module.FileService, "get_by_id", _get_by_id_ok)
monkeypatch.setattr(module.FileService, "get_id_list_by_id", _get_id_list)
monkeypatch.setattr(module.FileService, "create_folder", _create_folder)
monkeypatch.setattr(module, "filename_type", lambda _name: module.FileType.DOC.value)
monkeypatch.setattr(module, "duplicate_name", lambda _query, **kwargs: kwargs["name"])
monkeypatch.setattr(module, "get_uuid", lambda: "file-id")
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [])
def _insert(data):
inserted_payloads.append(data)
return SimpleNamespace(to_json=lambda: {"id": data["id"], "location": data["location"]})
monkeypatch.setattr(module.FileService, "insert", _insert)
res = _run(module.upload())
assert res["code"] == module.RetCode.SUCCESS
assert len(res["data"]) == 2
assert len(storage_collision.put_calls) == 2
assert any(location.endswith("_") for _, location, _ in storage_collision.put_calls)
assert len(inserted_payloads) == 2
_set_request(
monkeypatch,
module,
form={"parent_id": "pf1"},
files=_DummyFiles([_DummyUploadFile("boom.txt")]),
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("upload boom")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = _run(module.upload())
assert res["code"] == 500
assert "upload boom" in res["message"]
@pytest.mark.p2
def test_create_and_list_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
req_state = {"name": "file1"}
_set_request_json(monkeypatch, module, req_state)
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
monkeypatch.setattr(module.FileService, "is_parent_folder_exist", lambda _pf_id: False)
res = _run(module.create())
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "Parent Folder Doesn't Exist!" in res["message"]
req_state.update({"name": "dup", "parent_id": "pf1"})
monkeypatch.setattr(module.FileService, "is_parent_folder_exist", lambda _pf_id: True)
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [object()])
res = _run(module.create())
assert "Duplicated folder name" in res["message"]
inserted = {}
def _insert(data):
inserted["payload"] = data
return SimpleNamespace(to_json=lambda: data)
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "get_uuid", lambda: "uuid-folder")
monkeypatch.setattr(module.FileService, "insert", _insert)
req_state.update({"name": "folder", "parent_id": "pf1", "type": module.FileType.FOLDER.value})
res = _run(module.create())
assert res["code"] == module.RetCode.SUCCESS
assert inserted["payload"]["type"] == module.FileType.FOLDER.value
req_state.update({"name": "virtual", "parent_id": "pf1", "type": "UNKNOWN"})
res = _run(module.create())
assert res["code"] == module.RetCode.SUCCESS
assert inserted["payload"]["type"] == module.FileType.VIRTUAL.value
monkeypatch.setattr(
module.FileService,
"is_parent_folder_exist",
lambda _pf_id: (_ for _ in ()).throw(RuntimeError("create boom")),
)
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = _run(module.create())
assert res["code"] == 500
assert "create boom" in res["message"]
list_calls = {"init": 0}
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
monkeypatch.setattr(
module.FileService,
"init_knowledgebase_docs",
lambda _pf_id, _uid: list_calls.__setitem__("init", list_calls["init"] + 1),
)
_set_request(monkeypatch, module, args={})
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pf_id: (False, None))
res = module.list_files()
assert res["message"] == "Folder not found!"
assert list_calls["init"] == 1
_set_request(
monkeypatch,
module,
args={
"parent_id": "p1",
"keywords": "k",
"page": "2",
"page_size": "10",
"orderby": "name",
"desc": "False",
},
)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pf_id: (True, SimpleNamespace(id="p1")))
monkeypatch.setattr(module.FileService, "get_by_pf_id", lambda *_args, **_kwargs: ([{"id": "f1"}], 1))
monkeypatch.setattr(module.FileService, "get_parent_folder", lambda _pf_id: None)
res = module.list_files()
assert res["message"] == "File not found!"
monkeypatch.setattr(module.FileService, "get_parent_folder", lambda _pf_id: SimpleNamespace(to_json=lambda: {"id": "p0"}))
res = module.list_files()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["total"] == 1
assert res["data"]["parent_folder"]["id"] == "p0"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _pf_id: (_ for _ in ()).throw(RuntimeError("list boom")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = module.list_files()
assert res["code"] == 500
assert "list boom" in res["message"]
@pytest.mark.p2
def test_folder_lookup_routes_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: {"id": "root"})
res = module.get_root_folder()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["root_folder"]["id"] == "root"
monkeypatch.setattr(module.FileService, "get_root_folder", lambda _uid: (_ for _ in ()).throw(RuntimeError("root boom")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = module.get_root_folder()
assert res["code"] == 500
assert "root boom" in res["message"]
_set_request(monkeypatch, module, args={"file_id": "missing"})
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = module.get_parent_folder()
assert res["message"] == "Folder not found!"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="f1")))
monkeypatch.setattr(module.FileService, "get_parent_folder", lambda _file_id: SimpleNamespace(to_json=lambda: {"id": "p1"}))
res = module.get_parent_folder()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["parent_folder"]["id"] == "p1"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("parent boom")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = module.get_parent_folder()
assert res["code"] == 500
assert "parent boom" in res["message"]
_set_request(monkeypatch, module, args={"file_id": "missing"})
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = module.get_all_parent_folders()
assert res["message"] == "Folder not found!"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, SimpleNamespace(id="f1")))
monkeypatch.setattr(
module.FileService,
"get_all_parent_folders",
lambda _file_id: [SimpleNamespace(to_json=lambda: {"id": "p1"}), SimpleNamespace(to_json=lambda: {"id": "p2"})],
)
res = module.get_all_parent_folders()
assert res["code"] == module.RetCode.SUCCESS
assert res["data"]["parent_folders"] == [{"id": "p1"}, {"id": "p2"}]
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("all-parent boom")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = module.get_all_parent_folders()
assert res["code"] == 500
assert "all-parent boom" in res["message"]
@pytest.mark.p2
def test_rm_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
req_state = {"file_ids": ["missing"]}
_set_request_json(monkeypatch, module, req_state)
allow = {"value": True}
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: allow["value"])
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.rm())
assert res["message"] == "File or Folder not found!"
req_state["file_ids"] = ["tenant-missing"]
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile(_file_id, module.FileType.DOC.value, tenant_id=None)),
)
res = _run(module.rm())
assert res["message"] == "Tenant not found!"
req_state["file_ids"] = ["deny"]
allow["value"] = False
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile(_file_id, module.FileType.DOC.value)),
)
res = _run(module.rm())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert res["message"] == "No authorization."
allow["value"] = True
req_state["file_ids"] = ["kb"]
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (
True,
_DummyFile(
_file_id,
module.FileType.DOC.value,
source_type=module.FileSource.KNOWLEDGEBASE,
),
),
)
res = _run(module.rm())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
events = {
"rm_calls": [],
"deleted_files": [],
"deleted_links": [],
"removed_docs": [],
}
class _Storage:
def rm(self, bucket, location):
events["rm_calls"].append((bucket, location))
raise RuntimeError("storage rm boom")
monkeypatch.setattr(module.settings, "STORAGE_IMPL", _Storage())
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda file_id: [SimpleNamespace(document_id=f"doc-{file_id}")])
monkeypatch.setattr(module.DocumentService, "get_by_id", lambda doc_id: (True, SimpleNamespace(id=doc_id)))
monkeypatch.setattr(module.DocumentService, "get_tenant_id", lambda _doc_id: "tenant1")
monkeypatch.setattr(
module.DocumentService,
"remove_document",
lambda doc, tenant: events["removed_docs"].append((doc.id, tenant)),
)
monkeypatch.setattr(
module.File2DocumentService,
"delete_by_file_id",
lambda file_id: events["deleted_links"].append(file_id),
)
monkeypatch.setattr(module.FileService, "delete", lambda file: events["deleted_files"].append(file.id))
req_state["file_ids"] = ["doc-top"]
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("doc-top", module.FileType.DOC.value, location="top.bin")),
)
res = _run(module.rm())
assert res["code"] == module.RetCode.SUCCESS
req_state["file_ids"] = ["folder1"]
folder1 = _DummyFile("folder1", module.FileType.FOLDER.value, location="")
nested_folder = _DummyFile("nested-folder", module.FileType.FOLDER.value, parent_id="folder1", location="")
doc1 = _DummyFile("doc1", module.FileType.DOC.value, parent_id="folder1", location="doc1.bin")
doc2 = _DummyFile("doc2", module.FileType.DOC.value, parent_id="nested-folder", location="doc2.bin")
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, folder1))
def _list_all(parent_id):
if parent_id == "folder1":
return [nested_folder, doc1]
if parent_id == "nested-folder":
return [doc2]
return []
monkeypatch.setattr(module.FileService, "list_all_files_by_parent_id", _list_all)
res = _run(module.rm())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
assert ("pf1", "top.bin") in events["rm_calls"]
assert ("folder1", "doc1.bin") in events["rm_calls"]
assert ("nested-folder", "doc2.bin") in events["rm_calls"]
assert {"doc-top", "doc1", "doc2", "nested-folder", "folder1"}.issubset(set(events["deleted_files"]))
assert {"doc-top", "doc1", "doc2"}.issubset(set(events["deleted_links"]))
assert len(events["removed_docs"]) >= 3
async def _thread_pool_boom(_func, *_args, **_kwargs):
raise RuntimeError("rm route boom")
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_boom)
req_state["file_ids"] = ["boom"]
res = _run(module.rm())
assert res["code"] == 500
assert "rm route boom" in res["message"]
@pytest.mark.p2
def test_rename_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
req_state = {"file_id": "f1", "name": "new.txt"}
_set_request_json(monkeypatch, module, req_state)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.rename())
assert res["message"] == "File not found!"
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("f1", module.FileType.DOC.value, name="origin.txt")),
)
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: False)
res = _run(module.rename())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert res["message"] == "No authorization."
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: True)
req_state["name"] = "new.pdf"
res = _run(module.rename())
assert res["code"] == module.RetCode.ARGUMENT_ERROR
assert "extension of file can't be changed" in res["message"]
req_state["name"] = "new.txt"
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [SimpleNamespace(name="new.txt")])
res = _run(module.rename())
assert "Duplicated file name in the same folder." in res["message"]
monkeypatch.setattr(module.FileService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.FileService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(module.rename())
assert "Database error (File rename)!" in res["message"]
monkeypatch.setattr(module.FileService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [SimpleNamespace(document_id="doc1")])
monkeypatch.setattr(module.DocumentService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(module.rename())
assert "Database error (Document rename)!" in res["message"]
monkeypatch.setattr(module.File2DocumentService, "get_by_file_id", lambda _file_id: [])
res = _run(module.rename())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("rename boom")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = _run(module.rename())
assert res["code"] == 500
assert "rename boom" in res["message"]
@pytest.mark.p2
def test_get_file_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.get("missing"))
assert res["message"] == "Document not found!"
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (True, _DummyFile("f1", module.FileType.DOC.value, name="a.txt")),
)
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: False)
res = _run(module.get("f1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert res["message"] == "No authorization."
class _Storage:
def __init__(self):
self.calls = []
def get(self, bucket, location):
self.calls.append((bucket, location))
if len(self.calls) == 1:
return None
return b"blob-data"
storage = _Storage()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage)
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (
True,
_DummyFile(
"f1",
module.FileType.VISUAL.value,
parent_id="pf1",
location="loc1",
name="image.abc",
),
),
)
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: True)
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("pf2", "loc2"))
async def _make_response(data):
return _DummyResponse(data)
monkeypatch.setattr(module, "make_response", _make_response)
monkeypatch.setattr(
module,
"apply_safe_file_response_headers",
lambda response, content_type, ext: response.headers.update(
{"content_type": content_type, "extension": ext}
),
)
res = _run(module.get("f1"))
assert isinstance(res, _DummyResponse)
assert res.data == b"blob-data"
assert storage.calls == [("pf1", "loc1"), ("pf2", "loc2")]
assert res.headers["extension"] == "abc"
assert res.headers["content_type"] == "image/abc"
@pytest.mark.p2
def test_get_file_content_type_and_error_paths_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: True)
class _Storage:
@staticmethod
def get(_bucket, _location):
return b"blob-data"
monkeypatch.setattr(module.settings, "STORAGE_IMPL", _Storage())
monkeypatch.setattr(module.File2DocumentService, "get_storage_address", lambda **_kwargs: ("pf2", "loc2"))
async def _make_response(data):
return _DummyResponse(data)
headers_calls = []
def _apply_headers(response, content_type, ext):
headers_calls.append((content_type, ext))
response.headers["content_type"] = content_type
response.headers["extension"] = ext
monkeypatch.setattr(module, "make_response", _make_response)
monkeypatch.setattr(module, "apply_safe_file_response_headers", _apply_headers)
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (
True,
_DummyFile("img", module.FileType.VISUAL.value, parent_id="pf1", location="loc1", name="image.abc"),
),
)
res = _run(module.get("img"))
assert isinstance(res, _DummyResponse)
assert res.headers["content_type"] == "image/abc"
assert res.headers["extension"] == "abc"
monkeypatch.setattr(
module.FileService,
"get_by_id",
lambda _file_id: (
True,
_DummyFile("noext", module.FileType.DOC.value, parent_id="pf1", location="loc1", name="README"),
),
)
res = _run(module.get("noext"))
assert isinstance(res, _DummyResponse)
assert res.headers["content_type"] is None
assert res.headers["extension"] is None
assert headers_calls == [("image/abc", "abc"), (None, None)]
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (_ for _ in ()).throw(RuntimeError("get crash")))
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = _run(module.get("boom"))
assert res["code"] == 500
assert "get crash" in res["message"]
@pytest.mark.p2
def test_move_recursive_branch_matrix_unit(monkeypatch):
module = _load_file_app_module(monkeypatch)
req_state = {"src_file_ids": ["f1"], "dest_file_id": "dest"}
_set_request_json(monkeypatch, module, req_state)
async def _thread_pool_exec(fn, *args, **kwargs):
return fn(*args, **kwargs)
monkeypatch.setattr(module, "thread_pool_exec", _thread_pool_exec)
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: True)
dest_folder = SimpleNamespace(id="dest")
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (False, None))
res = _run(module.move())
assert res["message"] == "Parent folder not found!"
monkeypatch.setattr(module.FileService, "get_by_id", lambda _file_id: (True, dest_folder))
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _file_ids: [])
res = _run(module.move())
assert res["message"] == "Source files not found!"
req_state["src_file_ids"] = ["f1", "f2"]
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _file_ids: [_DummyFile("f1", module.FileType.DOC.value)])
res = _run(module.move())
assert res["message"] == "File or folder not found!"
req_state["src_file_ids"] = ["tenant-missing"]
monkeypatch.setattr(
module.FileService,
"get_by_ids",
lambda _file_ids: [_DummyFile("tenant-missing", module.FileType.DOC.value, tenant_id=None)],
)
res = _run(module.move())
assert res["message"] == "Tenant not found!"
req_state["src_file_ids"] = ["deny"]
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _file_ids: [_DummyFile("deny", module.FileType.DOC.value)])
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: False)
res = _run(module.move())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert res["message"] == "No authorization."
monkeypatch.setattr(module, "check_file_team_permission", lambda _file, _uid: True)
req_state["src_file_ids"] = ["folder_existing", "folder_new", "doc_main"]
folder_existing = _DummyFile(
"folder_existing",
module.FileType.FOLDER.value,
tenant_id="tenant1",
parent_id="old_bucket",
location="",
name="existing-folder",
)
folder_new = _DummyFile(
"folder_new",
module.FileType.FOLDER.value,
tenant_id="tenant1",
parent_id="old_bucket",
location="",
name="new-folder",
)
doc_main = _DummyFile(
"doc_main",
module.FileType.DOC.value,
tenant_id="tenant1",
parent_id="old_bucket",
location="doc.bin",
name="doc.bin",
)
sub_doc = _DummyFile(
"sub_doc",
module.FileType.DOC.value,
tenant_id="tenant1",
parent_id="folder_existing",
location="sub.txt",
name="sub.txt",
)
monkeypatch.setattr(module.FileService, "get_by_ids", lambda _file_ids: [folder_existing, folder_new, doc_main])
inserted = []
deleted = []
updated = []
existing_dest = SimpleNamespace(id="dest-existing")
new_dest = SimpleNamespace(id="dest-new")
def _query(**kwargs):
if kwargs.get("name") == "existing-folder":
return [existing_dest]
if kwargs.get("name") == "new-folder":
return []
return []
def _insert(payload):
inserted.append(payload)
return new_dest
def _list_subfiles(parent_id):
if parent_id == "folder_existing":
return [sub_doc]
if parent_id == "folder_new":
return []
return []
class _Storage:
def __init__(self):
self.move_calls = []
self._collision = 0
def obj_exist(self, _bucket, location):
if location == "doc.bin" and self._collision == 0:
self._collision += 1
return True
return False
def move(self, old_parent, old_location, new_parent, new_location):
self.move_calls.append((old_parent, old_location, new_parent, new_location))
storage = _Storage()
monkeypatch.setattr(module.settings, "STORAGE_IMPL", storage)
monkeypatch.setattr(module.FileService, "query", _query)
monkeypatch.setattr(module.FileService, "insert", _insert)
monkeypatch.setattr(module.FileService, "list_all_files_by_parent_id", _list_subfiles)
monkeypatch.setattr(module.FileService, "delete_by_id", lambda file_id: deleted.append(file_id))
monkeypatch.setattr(module.FileService, "update_by_id", lambda file_id, payload: updated.append((file_id, payload)) or True)
res = _run(module.move())
assert res["code"] == module.RetCode.SUCCESS
assert res["data"] is True
assert inserted and inserted[0]["name"] == "new-folder"
assert set(deleted) == {"folder_existing", "folder_new"}
assert ("old_bucket", "doc.bin", "dest", "doc.bin_") in storage.move_calls
assert ("folder_existing", "sub.txt", "dest-existing", "sub.txt") in storage.move_calls
assert ("doc_main", {"parent_id": "dest", "location": "doc.bin_"}) in updated
assert ("sub_doc", {"parent_id": "dest-existing", "location": "sub.txt"}) in updated
req_state["src_file_ids"] = ["boom_doc"]
monkeypatch.setattr(
module.FileService,
"get_by_ids",
lambda _file_ids: [
_DummyFile("boom_doc", module.FileType.DOC.value, tenant_id="tenant1", parent_id="old_bucket", location="boom", name="boom")
],
)
class _StorageBoom:
@staticmethod
def obj_exist(_bucket, _location):
return False
@staticmethod
def move(*_args, **_kwargs):
raise RuntimeError("storage down")
monkeypatch.setattr(module.settings, "STORAGE_IMPL", _StorageBoom())
monkeypatch.setattr(module, "server_error_response", lambda err: {"code": 500, "message": str(err)})
res = _run(module.move())
assert res["code"] == 500
assert "Move file failed at storage layer: storage down" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_file_app/test_file_routes_unit.py",
"license": "Apache License 2.0",
"lines": 1005,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_search_app/test_search_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
from copy import deepcopy
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _DummyAtomic:
def __enter__(self):
return self
def __exit__(self, _exc_type, _exc, _tb):
return False
class _Args(dict):
def get(self, key, default=None):
return super().get(key, default)
class _EnumValue:
def __init__(self, value):
self.value = value
class _DummyStatusEnum:
VALID = _EnumValue("1")
class _DummyRetCode:
SUCCESS = 0
EXCEPTION_ERROR = 100
ARGUMENT_ERROR = 101
DATA_ERROR = 102
OPERATING_ERROR = 103
AUTHENTICATION_ERROR = 109
class _SearchRecord:
def __init__(self, search_id="search-1", name="search", search_config=None):
self.id = search_id
self.name = name
self.search_config = {} if search_config is None else dict(search_config)
def to_dict(self):
return {"id": self.id, "name": self.name, "search_config": dict(self.search_config)}
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
async def _request_json():
return deepcopy(payload)
monkeypatch.setattr(module, "get_request_json", _request_json)
def _set_request_args(monkeypatch, module, args=None):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args(args or {})))
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_search_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.request = SimpleNamespace(args=_Args())
monkeypatch.setitem(sys.modules, "quart", quart_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.get_uuid = lambda: "search-uuid-1"
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
common_pkg.misc_utils = misc_utils_mod
constants_mod = ModuleType("common.constants")
constants_mod.RetCode = _DummyRetCode
constants_mod.StatusEnum = _DummyStatusEnum
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
common_pkg.constants = constants_mod
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="tenant-1")
apps_mod.login_required = lambda func: func
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_pkg.apps = apps_mod
constants_api_mod = ModuleType("api.constants")
constants_api_mod.DATASET_NAME_LIMIT = 255
monkeypatch.setitem(sys.modules, "api.constants", constants_api_mod)
db_pkg = ModuleType("api.db")
db_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db", db_pkg)
api_pkg.db = db_pkg
db_models_mod = ModuleType("api.db.db_models")
class _DummyDB:
@staticmethod
def atomic():
return _DummyAtomic()
db_models_mod.DB = _DummyDB
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
services_pkg.duplicate_name = lambda _checker, **kwargs: kwargs.get("name", "")
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
search_service_mod = ModuleType("api.db.services.search_service")
class _SearchService:
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def accessible4deletion(_search_id, _user_id):
return True
@staticmethod
def update_by_id(_search_id, _req):
return True
@staticmethod
def get_by_id(_search_id):
return True, _SearchRecord(search_id=_search_id, name="updated")
@staticmethod
def get_detail(_search_id):
return {"id": _search_id}
@staticmethod
def get_by_tenant_ids(_tenants, _user_id, _page_number, _items_per_page, _orderby, _desc, _keywords):
return [], 0
@staticmethod
def delete_by_id(_search_id):
return True
search_service_mod.SearchService = _SearchService
monkeypatch.setitem(sys.modules, "api.db.services.search_service", search_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
class _TenantService:
@staticmethod
def get_by_id(_tenant_id):
return True, SimpleNamespace(id=_tenant_id)
class _UserTenantService:
@staticmethod
def query(**_kwargs):
return [SimpleNamespace(tenant_id="tenant-1")]
user_service_mod.TenantService = _TenantService
user_service_mod.UserTenantService = _UserTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
utils_pkg = ModuleType("api.utils")
utils_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.utils", utils_pkg)
api_utils_mod = ModuleType("api.utils.api_utils")
async def _default_request_json():
return {}
def _get_data_error_result(code=_DummyRetCode.DATA_ERROR, message="Sorry! Data missing!"):
return {"code": code, "message": message}
def _get_json_result(code=_DummyRetCode.SUCCESS, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _server_error_response(error):
return {"code": _DummyRetCode.EXCEPTION_ERROR, "message": repr(error)}
def _validate_request(*_args, **_kwargs):
def _decorator(func):
return func
return _decorator
def _not_allowed_parameters(*_params):
def _decorator(func):
return func
return _decorator
api_utils_mod.get_request_json = _default_request_json
api_utils_mod.get_data_error_result = _get_data_error_result
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.server_error_response = _server_error_response
api_utils_mod.validate_request = _validate_request
api_utils_mod.not_allowed_parameters = _not_allowed_parameters
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
utils_pkg.api_utils = api_utils_mod
module_name = "test_search_routes_unit_module"
module_path = repo_root / "api" / "apps" / "search_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_create_route_matrix_unit(monkeypatch):
module = _load_search_app(monkeypatch)
_set_request_json(monkeypatch, module, {"name": 1})
res = _run(module.create())
assert res["code"] == module.RetCode.DATA_ERROR
assert "must be string" in res["message"]
_set_request_json(monkeypatch, module, {"name": " "})
res = _run(module.create())
assert res["code"] == module.RetCode.DATA_ERROR
assert "empty" in res["message"].lower()
_set_request_json(monkeypatch, module, {"name": "a" * 256})
res = _run(module.create())
assert res["code"] == module.RetCode.DATA_ERROR
assert "255" in res["message"]
_set_request_json(monkeypatch, module, {"name": "create-auth-fail"})
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tenant_id: (False, None))
res = _run(module.create())
assert res["code"] == module.RetCode.DATA_ERROR
assert "authorized identity" in res["message"].lower()
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tenant_id: (True, SimpleNamespace(id=_tenant_id)))
monkeypatch.setattr(module, "duplicate_name", lambda _checker, **kwargs: kwargs["name"] + "_dedup")
_set_request_json(monkeypatch, module, {"name": "create-fail", "description": "d"})
monkeypatch.setattr(module.SearchService, "save", lambda **_kwargs: False)
res = _run(module.create())
assert res["code"] == module.RetCode.DATA_ERROR
_set_request_json(monkeypatch, module, {"name": "create-ok", "description": "d"})
monkeypatch.setattr(module.SearchService, "save", lambda **_kwargs: True)
res = _run(module.create())
assert res["code"] == 0
assert res["data"]["search_id"] == "search-uuid-1"
def _raise_save(**_kwargs):
raise RuntimeError("save boom")
monkeypatch.setattr(module.SearchService, "save", _raise_save)
_set_request_json(monkeypatch, module, {"name": "create-exception", "description": "d"})
res = _run(module.create())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "save boom" in res["message"]
@pytest.mark.p2
def test_update_and_detail_route_matrix_unit(monkeypatch):
module = _load_search_app(monkeypatch)
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": 1, "search_config": {}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "must be string" in res["message"]
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": " ", "search_config": {}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "empty" in res["message"].lower()
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "a" * 256, "search_config": {}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "large than" in res["message"]
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "ok", "search_config": {}, "tenant_id": "tenant-1"})
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tenant_id: (False, None))
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "authorized identity" in res["message"].lower()
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tenant_id: (True, SimpleNamespace(id=_tenant_id)))
monkeypatch.setattr(module.SearchService, "accessible4deletion", lambda _search_id, _user_id: False)
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "ok", "search_config": {}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "authorization" in res["message"].lower()
monkeypatch.setattr(module.SearchService, "accessible4deletion", lambda _search_id, _user_id: True)
monkeypatch.setattr(module.SearchService, "query", lambda **_kwargs: [None])
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "ok", "search_config": {}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "cannot find search" in res["message"].lower()
existing = _SearchRecord(search_id="s1", name="old-name", search_config={"existing": 1})
def _query_duplicate(**kwargs):
if "id" in kwargs:
return [existing]
if "name" in kwargs:
return [SimpleNamespace(id="dup")]
return []
monkeypatch.setattr(module.SearchService, "query", _query_duplicate)
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "new-name", "search_config": {}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "duplicated" in res["message"].lower()
monkeypatch.setattr(module.SearchService, "query", lambda **_kwargs: [existing])
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "old-name", "search_config": [], "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "json object" in res["message"].lower()
captured = {}
def _update_fail(search_id, req):
captured["search_id"] = search_id
captured["req"] = dict(req)
return False
monkeypatch.setattr(module.SearchService, "update_by_id", _update_fail)
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "old-name", "search_config": {"top_k": 3}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "failed to update" in res["message"].lower()
assert captured["search_id"] == "s1"
assert "search_id" not in captured["req"]
assert "tenant_id" not in captured["req"]
assert captured["req"]["search_config"] == {"existing": 1, "top_k": 3}
monkeypatch.setattr(module.SearchService, "update_by_id", lambda _search_id, _req: True)
monkeypatch.setattr(module.SearchService, "get_by_id", lambda _search_id: (False, None))
res = _run(module.update())
assert res["code"] == module.RetCode.DATA_ERROR
assert "failed to fetch" in res["message"].lower()
monkeypatch.setattr(
module.SearchService,
"get_by_id",
lambda _search_id: (True, _SearchRecord(search_id=_search_id, name="old-name", search_config={"existing": 1, "top_k": 3})),
)
res = _run(module.update())
assert res["code"] == 0
assert res["data"]["id"] == "s1"
def _raise_query(**_kwargs):
raise RuntimeError("update boom")
monkeypatch.setattr(module.SearchService, "query", _raise_query)
_set_request_json(monkeypatch, module, {"search_id": "s1", "name": "old-name", "search_config": {"top_k": 3}, "tenant_id": "tenant-1"})
res = _run(module.update())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "update boom" in res["message"]
_set_request_args(monkeypatch, module, {"search_id": "s1"})
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(tenant_id="tenant-a")])
monkeypatch.setattr(module.SearchService, "query", lambda **_kwargs: [])
res = module.detail()
assert res["code"] == module.RetCode.OPERATING_ERROR
assert "permission" in res["message"].lower()
monkeypatch.setattr(module.SearchService, "query", lambda **_kwargs: [SimpleNamespace(id="s1")])
monkeypatch.setattr(module.SearchService, "get_detail", lambda _search_id: None)
res = module.detail()
assert res["code"] == module.RetCode.DATA_ERROR
assert "can't find" in res["message"].lower()
monkeypatch.setattr(module.SearchService, "get_detail", lambda _search_id: {"id": _search_id, "name": "detail-name"})
res = module.detail()
assert res["code"] == 0
assert res["data"]["id"] == "s1"
def _raise_detail(_search_id):
raise RuntimeError("detail boom")
monkeypatch.setattr(module.SearchService, "get_detail", _raise_detail)
res = module.detail()
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "detail boom" in res["message"]
@pytest.mark.p2
def test_list_and_rm_route_matrix_unit(monkeypatch):
module = _load_search_app(monkeypatch)
_set_request_args(
monkeypatch,
module,
{"keywords": "k", "page": "1", "page_size": "2", "orderby": "create_time", "desc": "false"},
)
_set_request_json(monkeypatch, module, {"owner_ids": []})
monkeypatch.setattr(
module.SearchService,
"get_by_tenant_ids",
lambda _tenants, _uid, _page, _size, _orderby, _desc, _keywords: ([{"id": "a", "tenant_id": "tenant-1"}], 1),
)
res = _run(module.list_search_app())
assert res["code"] == 0
assert res["data"]["total"] == 1
assert res["data"]["search_apps"][0]["id"] == "a"
_set_request_args(
monkeypatch,
module,
{"keywords": "k", "page": "1", "page_size": "1", "orderby": "create_time", "desc": "true"},
)
_set_request_json(monkeypatch, module, {"owner_ids": ["tenant-1"]})
monkeypatch.setattr(
module.SearchService,
"get_by_tenant_ids",
lambda _tenants, _uid, _page, _size, _orderby, _desc, _keywords: (
[{"id": "x", "tenant_id": "tenant-1"}, {"id": "y", "tenant_id": "tenant-2"}],
2,
),
)
res = _run(module.list_search_app())
assert res["code"] == 0
assert res["data"]["total"] == 1
assert len(res["data"]["search_apps"]) == 1
assert res["data"]["search_apps"][0]["tenant_id"] == "tenant-1"
def _raise_list(*_args, **_kwargs):
raise RuntimeError("list boom")
monkeypatch.setattr(module.SearchService, "get_by_tenant_ids", _raise_list)
_set_request_json(monkeypatch, module, {"owner_ids": []})
res = _run(module.list_search_app())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "list boom" in res["message"]
_set_request_json(monkeypatch, module, {"search_id": "search-1"})
monkeypatch.setattr(module.SearchService, "accessible4deletion", lambda _search_id, _user_id: False)
res = _run(module.rm())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "authorization" in res["message"].lower()
monkeypatch.setattr(module.SearchService, "accessible4deletion", lambda _search_id, _user_id: True)
monkeypatch.setattr(module.SearchService, "delete_by_id", lambda _search_id: False)
res = _run(module.rm())
assert res["code"] == module.RetCode.DATA_ERROR
assert "failed to delete" in res["message"].lower()
monkeypatch.setattr(module.SearchService, "delete_by_id", lambda _search_id: True)
res = _run(module.rm())
assert res["code"] == 0
assert res["data"] is True
def _raise_delete(_search_id):
raise RuntimeError("rm boom")
monkeypatch.setattr(module.SearchService, "delete_by_id", _raise_delete)
res = _run(module.rm())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "rm boom" in res["message"]
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_search_app/test_search_routes_unit.py",
"license": "Apache License 2.0",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_system_app/test_system_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _ExprField:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, other)
class _DummyAPITokenModel:
tenant_id = _ExprField("tenant_id")
token = _ExprField("token")
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_system_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.login_required = lambda fn: fn
apps_mod.current_user = SimpleNamespace(id="user-1")
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
settings_mod = ModuleType("common.settings")
settings_mod.docStoreConn = SimpleNamespace(health=lambda: {"type": "doc", "status": "green"})
settings_mod.STORAGE_IMPL = SimpleNamespace(health=lambda: True)
settings_mod.STORAGE_IMPL_TYPE = "MINIO"
settings_mod.DATABASE_TYPE = "MYSQL"
settings_mod.REGISTER_ENABLED = True
settings_mod.DISABLE_PASSWORD_LOGIN = False
common_pkg.settings = settings_mod
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
versions_mod = ModuleType("common.versions")
versions_mod.get_ragflow_version = lambda: "0.0.0-unit"
monkeypatch.setitem(sys.modules, "common.versions", versions_mod)
time_utils_mod = ModuleType("common.time_utils")
time_utils_mod.current_timestamp = lambda: 111
time_utils_mod.datetime_format = lambda _dt: "2026-01-01 00:00:00"
monkeypatch.setitem(sys.modules, "common.time_utils", time_utils_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
api_utils_mod.get_json_result = lambda data=None, message="success", code=0: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.get_data_error_result = lambda message="", code=102, data=None: {
"code": code,
"message": message,
"data": data,
}
api_utils_mod.server_error_response = lambda exc: {
"code": 100,
"message": repr(exc),
"data": None,
}
api_utils_mod.generate_confirmation_token = lambda: "ragflow-abcdefghijklmnopqrstuvwxyz0123456789"
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
api_service_mod = ModuleType("api.db.services.api_service")
api_service_mod.APITokenService = SimpleNamespace(
save=lambda **_kwargs: True,
query=lambda **_kwargs: [],
filter_update=lambda *_args, **_kwargs: True,
filter_delete=lambda *_args, **_kwargs: True,
)
monkeypatch.setitem(sys.modules, "api.db.services.api_service", api_service_mod)
kb_service_mod = ModuleType("api.db.services.knowledgebase_service")
kb_service_mod.KnowledgebaseService = SimpleNamespace(get_by_id=lambda _kb_id: True)
monkeypatch.setitem(sys.modules, "api.db.services.knowledgebase_service", kb_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
user_service_mod.UserTenantService = SimpleNamespace(
query=lambda **_kwargs: [SimpleNamespace(role="owner", tenant_id="tenant-1")]
)
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.APIToken = _DummyAPITokenModel
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
rag_pkg = ModuleType("rag")
rag_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag", rag_pkg)
rag_utils_pkg = ModuleType("rag.utils")
rag_utils_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "rag.utils", rag_utils_pkg)
redis_mod = ModuleType("rag.utils.redis_conn")
redis_mod.REDIS_CONN = SimpleNamespace(
health=lambda: True,
smembers=lambda *_args, **_kwargs: set(),
zrangebyscore=lambda *_args, **_kwargs: [],
)
monkeypatch.setitem(sys.modules, "rag.utils.redis_conn", redis_mod)
health_utils_mod = ModuleType("api.utils.health_utils")
health_utils_mod.run_health_checks = lambda: ({"status": "ok"}, True)
health_utils_mod.get_oceanbase_status = lambda: {"status": "alive"}
monkeypatch.setitem(sys.modules, "api.utils.health_utils", health_utils_mod)
quart_mod = ModuleType("quart")
quart_mod.jsonify = lambda payload: payload
monkeypatch.setitem(sys.modules, "quart", quart_mod)
module_path = repo_root / "api" / "apps" / "system_app.py"
spec = importlib.util.spec_from_file_location("test_system_routes_unit_module", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, "test_system_routes_unit_module", module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_status_branch_matrix_unit(monkeypatch):
module = _load_system_module(monkeypatch)
monkeypatch.setattr(module.settings, "docStoreConn", SimpleNamespace(health=lambda: {"type": "es", "status": "green"}))
monkeypatch.setattr(module.settings, "STORAGE_IMPL", SimpleNamespace(health=lambda: True))
monkeypatch.setattr(module.KnowledgebaseService, "get_by_id", lambda _kb_id: True)
monkeypatch.setattr(module.REDIS_CONN, "health", lambda: True)
monkeypatch.setattr(module.REDIS_CONN, "smembers", lambda _key: {"executor-1"})
monkeypatch.setattr(module.REDIS_CONN, "zrangebyscore", lambda *_args, **_kwargs: ['{"beat": 1}'])
res = module.status()
assert res["code"] == 0
assert res["data"]["doc_engine"]["status"] == "green"
assert res["data"]["storage"]["status"] == "green"
assert res["data"]["database"]["status"] == "green"
assert res["data"]["redis"]["status"] == "green"
assert res["data"]["task_executor_heartbeats"]["executor-1"][0]["beat"] == 1
monkeypatch.setattr(
module.settings,
"docStoreConn",
SimpleNamespace(health=lambda: (_ for _ in ()).throw(RuntimeError("doc down"))),
)
monkeypatch.setattr(
module.settings,
"STORAGE_IMPL",
SimpleNamespace(health=lambda: (_ for _ in ()).throw(RuntimeError("storage down"))),
)
monkeypatch.setattr(
module.KnowledgebaseService,
"get_by_id",
lambda _kb_id: (_ for _ in ()).throw(RuntimeError("db down")),
)
monkeypatch.setattr(module.REDIS_CONN, "health", lambda: False)
monkeypatch.setattr(module.REDIS_CONN, "smembers", lambda _key: (_ for _ in ()).throw(RuntimeError("hb down")))
res = module.status()
assert res["code"] == 0
assert res["data"]["doc_engine"]["status"] == "red"
assert "doc down" in res["data"]["doc_engine"]["error"]
assert res["data"]["storage"]["status"] == "red"
assert "storage down" in res["data"]["storage"]["error"]
assert res["data"]["database"]["status"] == "red"
assert "db down" in res["data"]["database"]["error"]
assert res["data"]["redis"]["status"] == "red"
assert "Lost connection!" in res["data"]["redis"]["error"]
assert res["data"]["task_executor_heartbeats"] == {}
@pytest.mark.p2
def test_healthz_and_oceanbase_status_matrix_unit(monkeypatch):
module = _load_system_module(monkeypatch)
monkeypatch.setattr(module, "run_health_checks", lambda: ({"status": "ok"}, True))
payload, status = module.healthz()
assert status == 200
assert payload["status"] == "ok"
monkeypatch.setattr(module, "run_health_checks", lambda: ({"status": "degraded"}, False))
payload, status = module.healthz()
assert status == 500
assert payload["status"] == "degraded"
monkeypatch.setattr(module, "get_oceanbase_status", lambda: {"status": "alive", "latency_ms": 8})
res = module.oceanbase_status()
assert res["code"] == 0
assert res["data"]["status"] == "alive"
monkeypatch.setattr(module, "get_oceanbase_status", lambda: (_ for _ in ()).throw(RuntimeError("ocean boom")))
res = module.oceanbase_status()
assert res["code"] == 500
assert res["data"]["status"] == "error"
assert "ocean boom" in res["data"]["message"]
@pytest.mark.p2
def test_system_token_routes_matrix_unit(monkeypatch):
module = _load_system_module(monkeypatch)
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
res = module.new_token()
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(role="owner", tenant_id="tenant-1")])
monkeypatch.setattr(module.APITokenService, "save", lambda **_kwargs: False)
res = module.new_token()
assert res["message"] == "Fail to new a dialog!"
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("tenant query boom")))
res = module.new_token()
assert res["code"] == 100
assert "tenant query boom" in res["message"]
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
res = module.token_list()
assert res["message"] == "Tenant not found!"
class _Token:
def __init__(self, token, beta):
self.token = token
self.beta = beta
def to_dict(self):
return {"token": self.token, "beta": self.beta}
filter_updates = []
monkeypatch.setattr(module, "generate_confirmation_token", lambda: "ragflow-abcdefghijklmnopqrstuvwxyz0123456789")
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(role="owner", tenant_id="tenant-9")])
monkeypatch.setattr(module.APITokenService, "query", lambda **_kwargs: [_Token("tok-1", ""), _Token("tok-2", "beta-2")])
monkeypatch.setattr(module.APITokenService, "filter_update", lambda conds, payload: filter_updates.append((conds, payload)))
res = module.token_list()
assert res["code"] == 0
assert len(res["data"]) == 2
assert len(res["data"][0]["beta"]) == 32
assert res["data"][1]["beta"] == "beta-2"
assert len(filter_updates) == 1
monkeypatch.setattr(
module.APITokenService,
"query",
lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("token list boom")),
)
res = module.token_list()
assert res["code"] == 100
assert "token list boom" in res["message"]
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
res = module.rm("tok-1")
assert res["message"] == "Tenant not found!"
deleted = []
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(role="owner", tenant_id="tenant-3")])
monkeypatch.setattr(module.APITokenService, "filter_delete", lambda conds: deleted.append(conds))
res = module.rm("tok-1")
assert res["code"] == 0
assert res["data"] is True
assert deleted
monkeypatch.setattr(
module.APITokenService,
"filter_delete",
lambda _conds: (_ for _ in ()).throw(RuntimeError("delete boom")),
)
res = module.rm("tok-1")
assert res["code"] == 100
assert "delete boom" in res["message"]
@pytest.mark.p2
def test_get_config_returns_register_enabled_unit(monkeypatch):
module = _load_system_module(monkeypatch)
monkeypatch.setattr(module.settings, "REGISTER_ENABLED", False)
res = module.get_config()
assert res["code"] == 0
assert res["data"]["registerEnabled"] is False
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_system_app/test_system_routes_unit.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_user_app/test_tenant_app_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _ExprField:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, other)
class _Invitee:
def __init__(self, user_id="invitee-1", email="invitee@example.com"):
self.id = user_id
self.email = email
def to_dict(self):
return {
"id": self.id,
"avatar": "avatar-url",
"email": self.email,
"nickname": "Invitee",
"password": "ignored",
}
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(payload))
def _load_tenant_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = SimpleNamespace(id="tenant-1", email="owner@example.com")
apps_mod.login_required = lambda fn: fn
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
db_mod = ModuleType("api.db")
db_mod.UserTenantRole = SimpleNamespace(NORMAL="normal", OWNER="owner", INVITE="invite")
monkeypatch.setitem(sys.modules, "api.db", db_mod)
db_models_mod = ModuleType("api.db.db_models")
db_models_mod.UserTenant = type(
"UserTenant",
(),
{
"tenant_id": _ExprField("tenant_id"),
"user_id": _ExprField("user_id"),
},
)
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
user_service_mod = ModuleType("api.db.services.user_service")
class _UserTenantService:
@staticmethod
def get_by_tenant_id(_tenant_id):
return []
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def filter_delete(_conditions):
return True
@staticmethod
def get_tenants_by_user_id(_user_id):
return []
@staticmethod
def filter_update(_conditions, _payload):
return True
class _UserService:
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def get_by_id(_user_id):
return False, None
user_service_mod.UserTenantService = _UserTenantService
user_service_mod.UserService = _UserService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
api_utils_mod.get_json_result = lambda data=None, message="", code=0: {"code": code, "message": message, "data": data}
api_utils_mod.get_data_error_result = lambda message="": {"code": 102, "message": message, "data": False}
api_utils_mod.server_error_response = lambda exc: {"code": 100, "message": repr(exc), "data": False}
api_utils_mod.validate_request = lambda *_args, **_kwargs: (lambda fn: fn)
api_utils_mod.get_request_json = lambda: _AwaitableValue({})
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
web_utils_mod = ModuleType("api.utils.web_utils")
web_utils_mod.send_invite_email = lambda **_kwargs: {"ok": True}
monkeypatch.setitem(sys.modules, "api.utils.web_utils", web_utils_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
constants_mod = ModuleType("common.constants")
constants_mod.RetCode = SimpleNamespace(AUTHENTICATION_ERROR=401, SERVER_ERROR=500, DATA_ERROR=102)
constants_mod.StatusEnum = SimpleNamespace(VALID=SimpleNamespace(value=1))
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.get_uuid = lambda: "uuid-1"
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
time_utils_mod = ModuleType("common.time_utils")
time_utils_mod.delta_seconds = lambda _value: 0
monkeypatch.setitem(sys.modules, "common.time_utils", time_utils_mod)
settings_mod = ModuleType("common.settings")
settings_mod.MAIL_FRONTEND_URL = "https://frontend.example/invite"
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
common_pkg.settings = settings_mod
sys.modules.pop("test_tenant_app_unit_module", None)
module_path = repo_root / "api" / "apps" / "tenant_app.py"
spec = importlib.util.spec_from_file_location("test_tenant_app_unit_module", module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, "test_tenant_app_unit_module", module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_user_list_auth_success_exception_matrix_unit(monkeypatch):
module = _load_tenant_module(monkeypatch)
module.current_user.id = "other-user"
res = module.user_list("tenant-1")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
assert res["message"] == "No authorization.", res
module.current_user.id = "tenant-1"
monkeypatch.setattr(
module.UserTenantService,
"get_by_tenant_id",
lambda _tenant_id: [{"id": "u1", "update_date": "2024-01-01 00:00:00"}],
)
monkeypatch.setattr(module, "delta_seconds", lambda _value: 42)
res = module.user_list("tenant-1")
assert res["code"] == 0, res
assert res["data"][0]["delta_seconds"] == 42, res
monkeypatch.setattr(module.UserTenantService, "get_by_tenant_id", lambda _tenant_id: (_ for _ in ()).throw(RuntimeError("list boom")))
res = module.user_list("tenant-1")
assert res["code"] == 100, res
assert "list boom" in res["message"], res
@pytest.mark.p2
def test_create_invite_role_and_email_failure_matrix_unit(monkeypatch):
module = _load_tenant_module(monkeypatch)
module.current_user.id = "other-user"
_set_request_json(monkeypatch, module, {"email": "invitee@example.com"})
res = _run(module.create("tenant-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
assert res["message"] == "No authorization.", res
module.current_user.id = "tenant-1"
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
res = _run(module.create("tenant-1"))
assert res["message"] == "User not found.", res
invitee = _Invitee()
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [invitee])
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(role=module.UserTenantRole.NORMAL)])
res = _run(module.create("tenant-1"))
assert "already in the team." in res["message"], res
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(role=module.UserTenantRole.OWNER)])
res = _run(module.create("tenant-1"))
assert "owner of the team." in res["message"], res
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(role="strange-role")])
res = _run(module.create("tenant-1"))
assert "role: strange-role is invalid." in res["message"], res
saved = []
scheduled = []
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.UserTenantService, "save", lambda **kwargs: saved.append(kwargs) or True)
monkeypatch.setattr(module.UserService, "get_by_id", lambda _user_id: (True, SimpleNamespace(nickname="Inviter Nick")))
monkeypatch.setattr(module, "send_invite_email", lambda **kwargs: kwargs)
monkeypatch.setattr(module.asyncio, "create_task", lambda payload: scheduled.append(payload) or SimpleNamespace())
res = _run(module.create("tenant-1"))
assert res["code"] == 0, res
assert saved and saved[-1]["role"] == module.UserTenantRole.INVITE, saved
assert scheduled and scheduled[-1]["inviter"] == "Inviter Nick", scheduled
assert sorted(res["data"].keys()) == ["avatar", "email", "id", "nickname"], res
monkeypatch.setattr(module.asyncio, "create_task", lambda _payload: (_ for _ in ()).throw(RuntimeError("send boom")))
res = _run(module.create("tenant-1"))
assert res["code"] == module.RetCode.SERVER_ERROR, res
assert "Failed to send invite email." in res["message"], res
@pytest.mark.p2
def test_rm_and_tenant_list_matrix_unit(monkeypatch):
module = _load_tenant_module(monkeypatch)
module.current_user.id = "outsider"
res = module.rm("tenant-1", "user-2")
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
assert res["message"] == "No authorization.", res
module.current_user.id = "tenant-1"
deleted = []
monkeypatch.setattr(module.UserTenantService, "filter_delete", lambda conditions: deleted.append(conditions) or True)
res = module.rm("tenant-1", "user-2")
assert res["code"] == 0, res
assert res["data"] is True, res
assert deleted, "filter_delete should be called"
monkeypatch.setattr(module.UserTenantService, "filter_delete", lambda _conditions: (_ for _ in ()).throw(RuntimeError("rm boom")))
res = module.rm("tenant-1", "user-2")
assert res["code"] == 100, res
assert "rm boom" in res["message"], res
monkeypatch.setattr(
module.UserTenantService,
"get_tenants_by_user_id",
lambda _user_id: [{"id": "tenant-1", "update_date": "2024-01-01 00:00:00"}],
)
monkeypatch.setattr(module, "delta_seconds", lambda _value: 9)
res = module.tenant_list()
assert res["code"] == 0, res
assert res["data"][0]["delta_seconds"] == 9, res
monkeypatch.setattr(module.UserTenantService, "get_tenants_by_user_id", lambda _user_id: (_ for _ in ()).throw(RuntimeError("tenant boom")))
res = module.tenant_list()
assert res["code"] == 100, res
assert "tenant boom" in res["message"], res
@pytest.mark.p2
def test_agree_success_and_exception_unit(monkeypatch):
module = _load_tenant_module(monkeypatch)
calls = []
monkeypatch.setattr(module.UserTenantService, "filter_update", lambda conditions, payload: calls.append((conditions, payload)) or True)
res = module.agree("tenant-1")
assert res["code"] == 0, res
assert res["data"] is True, res
assert calls and calls[-1][1]["role"] == module.UserTenantRole.NORMAL
monkeypatch.setattr(module.UserTenantService, "filter_update", lambda _conditions, _payload: (_ for _ in ()).throw(RuntimeError("agree boom")))
res = module.agree("tenant-1")
assert res["code"] == 100, res
assert "agree boom" in res["message"], res
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_user_app/test_tenant_app_unit.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_web_api/test_user_app/test_user_app_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import importlib.util
import sys
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _Args(dict):
def get(self, key, default=None, type=None):
value = super().get(key, default)
if type is None:
return value
try:
return type(value)
except (TypeError, ValueError):
return default
class _DummyResponse:
def __init__(self, data):
self.data = data
self.headers = {}
class _DummyHTTPResponse:
def __init__(self, payload):
self._payload = payload
def json(self):
return self._payload
class _DummyRedis:
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def set(self, key, value, _ttl=None):
self.store[key] = value
def delete(self, key):
self.store.pop(key, None)
class _DummyUser:
def __init__(self, user_id, email, *, password="stored-password", is_active="1", nickname="nick"):
self.id = user_id
self.email = email
self.password = password
self.is_active = is_active
self.nickname = nickname
self.access_token = ""
self.save_calls = 0
def save(self):
self.save_calls += 1
def get_id(self):
return self.id
def to_json(self):
return {"id": self.id, "email": self.email, "nickname": self.nickname}
def to_dict(self):
return {"id": self.id, "email": self.email}
class _Field:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return (self.name, other)
def _run(coro):
return asyncio.run(coro)
def _set_request_json(monkeypatch, module, payload):
async def _request_json():
return payload
monkeypatch.setattr(module, "get_request_json", _request_json)
def _set_request_args(monkeypatch, module, args=None):
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args(args or {})))
@pytest.fixture(scope="session")
def auth():
return "unit-auth"
@pytest.fixture(scope="session", autouse=True)
def set_tenant_info():
return None
def _load_user_app(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
quart_mod = ModuleType("quart")
quart_mod.session = {}
quart_mod.request = SimpleNamespace(args=_Args({}))
async def _make_response(data):
return _DummyResponse(data)
quart_mod.make_response = _make_response
quart_mod.redirect = lambda url: {"redirect": url}
monkeypatch.setitem(sys.modules, "quart", quart_mod)
api_pkg = ModuleType("api")
api_pkg.__path__ = [str(repo_root / "api")]
monkeypatch.setitem(sys.modules, "api", api_pkg)
apps_mod = ModuleType("api.apps")
apps_mod.__path__ = [str(repo_root / "api" / "apps")]
apps_mod.current_user = _DummyUser("current-user", "current@example.com")
apps_mod.login_required = lambda fn: fn
apps_mod.login_user = lambda _user: True
apps_mod.logout_user = lambda: True
monkeypatch.setitem(sys.modules, "api.apps", apps_mod)
api_pkg.apps = apps_mod
apps_auth_mod = ModuleType("api.apps.auth")
apps_auth_mod.get_auth_client = lambda _config: SimpleNamespace(
get_authorization_url=lambda state: f"https://oauth.example/{state}"
)
monkeypatch.setitem(sys.modules, "api.apps.auth", apps_auth_mod)
db_mod = ModuleType("api.db")
db_mod.FileType = SimpleNamespace(FOLDER=SimpleNamespace(value="folder"))
db_mod.UserTenantRole = SimpleNamespace(OWNER="owner")
monkeypatch.setitem(sys.modules, "api.db", db_mod)
api_pkg.db = db_mod
db_models_mod = ModuleType("api.db.db_models")
class _DummyTenantLLMModel:
tenant_id = _Field("tenant_id")
@staticmethod
def delete():
class _DeleteQuery:
def where(self, *_args, **_kwargs):
return self
def execute(self):
return 1
return _DeleteQuery()
db_models_mod.TenantLLM = _DummyTenantLLMModel
monkeypatch.setitem(sys.modules, "api.db.db_models", db_models_mod)
services_pkg = ModuleType("api.db.services")
services_pkg.__path__ = []
monkeypatch.setitem(sys.modules, "api.db.services", services_pkg)
file_service_mod = ModuleType("api.db.services.file_service")
class _StubFileService:
@staticmethod
def insert(_data):
return True
file_service_mod.FileService = _StubFileService
monkeypatch.setitem(sys.modules, "api.db.services.file_service", file_service_mod)
llm_service_mod = ModuleType("api.db.services.llm_service")
llm_service_mod.get_init_tenant_llm = lambda _user_id: []
monkeypatch.setitem(sys.modules, "api.db.services.llm_service", llm_service_mod)
tenant_llm_service_mod = ModuleType("api.db.services.tenant_llm_service")
class _StubTenantLLMService:
@staticmethod
def insert_many(_payload):
return True
tenant_llm_service_mod.TenantLLMService = _StubTenantLLMService
monkeypatch.setitem(sys.modules, "api.db.services.tenant_llm_service", tenant_llm_service_mod)
user_service_mod = ModuleType("api.db.services.user_service")
class _StubTenantService:
@staticmethod
def insert(**_kwargs):
return True
@staticmethod
def delete_by_id(_tenant_id):
return True
@staticmethod
def get_by_id(_tenant_id):
return True, SimpleNamespace(id=_tenant_id)
@staticmethod
def get_info_by(_user_id):
return []
@staticmethod
def update_by_id(_tenant_id, _payload):
return True
class _StubUserService:
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def query_user(_email, _password):
return None
@staticmethod
def query_user_by_email(**_kwargs):
return []
@staticmethod
def save(**_kwargs):
return True
@staticmethod
def delete_by_id(_user_id):
return True
@staticmethod
def update_by_id(_user_id, _payload):
return True
@staticmethod
def update_user_password(_user_id, _new_password):
return True
class _StubUserTenantService:
@staticmethod
def insert(**_kwargs):
return True
@staticmethod
def query(**_kwargs):
return []
@staticmethod
def delete_by_id(_user_tenant_id):
return True
user_service_mod.TenantService = _StubTenantService
user_service_mod.UserService = _StubUserService
user_service_mod.UserTenantService = _StubUserTenantService
monkeypatch.setitem(sys.modules, "api.db.services.user_service", user_service_mod)
api_utils_mod = ModuleType("api.utils.api_utils")
async def _default_request_json():
return {}
def _get_json_result(code=0, message="success", data=None):
return {"code": code, "message": message, "data": data}
def _get_data_error_result(code=102, message="Sorry! Data missing!", data=None):
return {"code": code, "message": message, "data": data}
def _server_error_response(error):
return {"code": 100, "message": repr(error)}
def _validate_request(*_args, **_kwargs):
def _decorator(func):
return func
return _decorator
api_utils_mod.get_request_json = _default_request_json
api_utils_mod.get_json_result = _get_json_result
api_utils_mod.get_data_error_result = _get_data_error_result
api_utils_mod.server_error_response = _server_error_response
api_utils_mod.validate_request = _validate_request
monkeypatch.setitem(sys.modules, "api.utils.api_utils", api_utils_mod)
crypt_mod = ModuleType("api.utils.crypt")
crypt_mod.decrypt = lambda value: value
monkeypatch.setitem(sys.modules, "api.utils.crypt", crypt_mod)
web_utils_mod = ModuleType("api.utils.web_utils")
web_utils_mod.send_email_html = lambda *_args, **_kwargs: _AwaitableValue(True)
web_utils_mod.OTP_LENGTH = 6
web_utils_mod.OTP_TTL_SECONDS = 600
web_utils_mod.ATTEMPT_LIMIT = 5
web_utils_mod.ATTEMPT_LOCK_SECONDS = 600
web_utils_mod.RESEND_COOLDOWN_SECONDS = 60
web_utils_mod.otp_keys = lambda email: (
f"otp:{email}:code",
f"otp:{email}:attempts",
f"otp:{email}:last",
f"otp:{email}:lock",
)
web_utils_mod.hash_code = lambda code, _salt: f"hash:{code}"
web_utils_mod.captcha_key = lambda email: f"captcha:{email}"
monkeypatch.setitem(sys.modules, "api.utils.web_utils", web_utils_mod)
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
settings_mod = ModuleType("common.settings")
settings_mod.OAUTH_CONFIG = {
"github": {"display_name": "GitHub", "icon": "gh"},
"feishu": {"display_name": "Feishu", "icon": "fs"},
}
settings_mod.GITHUB_OAUTH = {"url": "https://github.example/oauth", "client_id": "cid", "secret_key": "sk"}
settings_mod.FEISHU_OAUTH = {
"app_access_token_url": "https://feishu.example/app_token",
"user_access_token_url": "https://feishu.example/user_token",
"app_id": "app-id",
"app_secret": "app-secret",
"grant_type": "authorization_code",
}
settings_mod.CHAT_MDL = "chat-mdl"
settings_mod.EMBEDDING_MDL = "embd-mdl"
settings_mod.ASR_MDL = "asr-mdl"
settings_mod.PARSERS = []
settings_mod.IMAGE2TEXT_MDL = "img-mdl"
settings_mod.RERANK_MDL = "rerank-mdl"
settings_mod.REGISTER_ENABLED = True
monkeypatch.setitem(sys.modules, "common.settings", settings_mod)
common_pkg.settings = settings_mod
constants_mod = ModuleType("common.constants")
constants_mod.RetCode = SimpleNamespace(
AUTHENTICATION_ERROR=401,
SERVER_ERROR=500,
FORBIDDEN=403,
EXCEPTION_ERROR=100,
OPERATING_ERROR=300,
ARGUMENT_ERROR=101,
DATA_ERROR=102,
NOT_EFFECTIVE=103,
SUCCESS=0,
)
monkeypatch.setitem(sys.modules, "common.constants", constants_mod)
connection_utils_mod = ModuleType("common.connection_utils")
async def _construct_response(data=None, auth=None, message=""):
return {"code": 0, "message": message, "data": data, "auth": auth}
connection_utils_mod.construct_response = _construct_response
monkeypatch.setitem(sys.modules, "common.connection_utils", connection_utils_mod)
time_utils_mod = ModuleType("common.time_utils")
time_utils_mod.current_timestamp = lambda: 111
time_utils_mod.datetime_format = lambda _dt: "2024-01-01 00:00:00"
time_utils_mod.get_format_time = lambda: "2024-01-01 00:00:00"
monkeypatch.setitem(sys.modules, "common.time_utils", time_utils_mod)
misc_utils_mod = ModuleType("common.misc_utils")
misc_utils_mod.download_img = lambda _url: "avatar"
misc_utils_mod.get_uuid = lambda: "uuid-default"
monkeypatch.setitem(sys.modules, "common.misc_utils", misc_utils_mod)
http_client_mod = ModuleType("common.http_client")
async def _async_request(_method, _url, **_kwargs):
return _DummyHTTPResponse({})
http_client_mod.async_request = _async_request
monkeypatch.setitem(sys.modules, "common.http_client", http_client_mod)
rag_pkg = ModuleType("rag")
rag_pkg.__path__ = [str(repo_root / "rag")]
monkeypatch.setitem(sys.modules, "rag", rag_pkg)
rag_utils_pkg = ModuleType("rag.utils")
rag_utils_pkg.__path__ = [str(repo_root / "rag" / "utils")]
monkeypatch.setitem(sys.modules, "rag.utils", rag_utils_pkg)
redis_mod = ModuleType("rag.utils.redis_conn")
redis_mod.REDIS_CONN = _DummyRedis()
monkeypatch.setitem(sys.modules, "rag.utils.redis_conn", redis_mod)
module_name = "test_user_app_unit_module"
module_path = repo_root / "api" / "apps" / "user_app.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
@pytest.mark.p2
def test_login_route_branch_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
_set_request_json(monkeypatch, module, {})
res = _run(module.login())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "Unauthorized" in res["message"]
_set_request_json(monkeypatch, module, {"email": "unknown@example.com", "password": "enc"})
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
res = _run(module.login())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "not registered" in res["message"]
_set_request_json(monkeypatch, module, {"email": "known@example.com", "password": "enc"})
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [SimpleNamespace(email="known@example.com")])
def _raise_decrypt(_value):
raise RuntimeError("decrypt explode")
monkeypatch.setattr(module, "decrypt", _raise_decrypt)
res = _run(module.login())
assert res["code"] == module.RetCode.SERVER_ERROR
assert "Fail to crypt password" in res["message"]
user_inactive = _DummyUser("u-inactive", "known@example.com", is_active="0")
monkeypatch.setattr(module, "decrypt", lambda value: value)
monkeypatch.setattr(module.UserService, "query_user", lambda _email, _password: user_inactive)
res = _run(module.login())
assert res["code"] == module.RetCode.FORBIDDEN
assert "disabled" in res["message"]
monkeypatch.setattr(module.UserService, "query_user", lambda _email, _password: None)
res = _run(module.login())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "do not match" in res["message"]
@pytest.mark.p2
def test_login_channels_and_oauth_login_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
module.settings.OAUTH_CONFIG = {"github": {"display_name": "GitHub", "icon": "gh"}}
res = _run(module.get_login_channels())
assert res["code"] == 0
assert res["data"][0]["channel"] == "github"
class _BrokenOAuthConfig:
@staticmethod
def items():
raise RuntimeError("broken oauth config")
module.settings.OAUTH_CONFIG = _BrokenOAuthConfig()
res = _run(module.get_login_channels())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "Load channels failure" in res["message"]
module.settings.OAUTH_CONFIG = {"github": {"display_name": "GitHub", "icon": "gh"}}
with pytest.raises(ValueError, match="Invalid channel name: missing"):
_run(module.oauth_login("missing"))
module.session.clear()
monkeypatch.setattr(module, "get_uuid", lambda: "state-123")
class _AuthClient:
@staticmethod
def get_authorization_url(state):
return f"https://oauth.example/{state}"
monkeypatch.setattr(module, "get_auth_client", lambda _config: _AuthClient())
res = _run(module.oauth_login("github"))
assert res["redirect"] == "https://oauth.example/state-123"
assert module.session["oauth_state"] == "state-123"
@pytest.mark.p2
def test_oauth_callback_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
module.settings.OAUTH_CONFIG = {"github": {"display_name": "GitHub", "icon": "gh"}}
class _SyncAuthClient:
def __init__(self, token_info, user_info):
self._token_info = token_info
self._user_info = user_info
def exchange_code_for_token(self, _code):
return self._token_info
def fetch_user_info(self, _token, id_token=None):
_ = id_token
return self._user_info
class _AsyncAuthClient:
def __init__(self, token_info, user_info):
self._token_info = token_info
self._user_info = user_info
async def async_exchange_code_for_token(self, _code):
return self._token_info
async def async_fetch_user_info(self, _token, id_token=None):
_ = id_token
return self._user_info
_set_request_args(monkeypatch, module, {"state": "x", "code": "c"})
module.session.clear()
res = _run(module.oauth_callback("missing"))
assert "Invalid channel name: missing" in res["redirect"]
sync_ok = _SyncAuthClient(
token_info={"access_token": "token-sync", "id_token": "id-sync"},
user_info=SimpleNamespace(email="sync@example.com", avatar_url="http://img", nickname="sync"),
)
monkeypatch.setattr(module, "get_auth_client", lambda _config: sync_ok)
module.session.clear()
module.session["oauth_state"] = "expected"
_set_request_args(monkeypatch, module, {"state": "wrong", "code": "code"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?error=invalid_state"
module.session.clear()
module.session["oauth_state"] = "ok-state"
_set_request_args(monkeypatch, module, {"state": "ok-state"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?error=missing_code"
sync_missing_token = _SyncAuthClient(
token_info={"id_token": "id-only"},
user_info=SimpleNamespace(email="sync@example.com", avatar_url="http://img", nickname="sync"),
)
monkeypatch.setattr(module, "get_auth_client", lambda _config: sync_missing_token)
module.session.clear()
module.session["oauth_state"] = "token-state"
_set_request_args(monkeypatch, module, {"state": "token-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?error=token_failed"
sync_missing_email = _SyncAuthClient(
token_info={"access_token": "token-sync", "id_token": "id-sync"},
user_info=SimpleNamespace(email=None, avatar_url="http://img", nickname="sync"),
)
monkeypatch.setattr(module, "get_auth_client", lambda _config: sync_missing_email)
module.session.clear()
module.session["oauth_state"] = "email-state"
_set_request_args(monkeypatch, module, {"state": "email-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?error=email_missing"
async_new_user = _AsyncAuthClient(
token_info={"access_token": "token-async", "id_token": "id-async"},
user_info=SimpleNamespace(email="new@example.com", avatar_url="http://img", nickname="new-user"),
)
monkeypatch.setattr(module, "get_auth_client", lambda _config: async_new_user)
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
def _raise_download(_url):
raise RuntimeError("download explode")
monkeypatch.setattr(module, "download_img", _raise_download)
monkeypatch.setattr(module, "user_register", lambda _user_id, _user: None)
rollback_calls = []
monkeypatch.setattr(module, "rollback_user_registration", lambda user_id: rollback_calls.append(user_id))
monkeypatch.setattr(module, "get_uuid", lambda: "new-user-id")
module.session.clear()
module.session["oauth_state"] = "new-user-state"
_set_request_args(monkeypatch, module, {"state": "new-user-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert "Failed to register new@example.com" in res["redirect"]
assert rollback_calls == ["new-user-id"]
monkeypatch.setattr(module, "download_img", lambda _url: "avatar")
monkeypatch.setattr(
module,
"user_register",
lambda _user_id, _user: [_DummyUser("dup-1", "new@example.com"), _DummyUser("dup-2", "new@example.com")],
)
rollback_calls.clear()
module.session.clear()
module.session["oauth_state"] = "dup-user-state"
_set_request_args(monkeypatch, module, {"state": "dup-user-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert "Same email: new@example.com exists!" in res["redirect"]
assert rollback_calls == ["new-user-id"]
new_user = _DummyUser("new-user", "new@example.com")
login_calls = []
monkeypatch.setattr(module, "login_user", lambda user: login_calls.append(user))
monkeypatch.setattr(module, "user_register", lambda _user_id, _user: [new_user])
module.session.clear()
module.session["oauth_state"] = "create-user-state"
_set_request_args(monkeypatch, module, {"state": "create-user-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?auth=new-user"
assert login_calls and login_calls[-1] is new_user
async_existing_inactive = _AsyncAuthClient(
token_info={"access_token": "token-existing", "id_token": "id-existing"},
user_info=SimpleNamespace(email="existing@example.com", avatar_url="http://img", nickname="existing"),
)
monkeypatch.setattr(module, "get_auth_client", lambda _config: async_existing_inactive)
inactive_user = _DummyUser("existing-user", "existing@example.com", is_active="0")
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [inactive_user])
module.session.clear()
module.session["oauth_state"] = "inactive-state"
_set_request_args(monkeypatch, module, {"state": "inactive-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?error=user_inactive"
async_existing_ok = _AsyncAuthClient(
token_info={"access_token": "token-existing", "id_token": "id-existing"},
user_info=SimpleNamespace(email="existing@example.com", avatar_url="http://img", nickname="existing"),
)
monkeypatch.setattr(module, "get_auth_client", lambda _config: async_existing_ok)
existing_user = _DummyUser("existing-user", "existing@example.com")
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [existing_user])
login_calls.clear()
monkeypatch.setattr(module, "login_user", lambda user: login_calls.append(user))
monkeypatch.setattr(module, "get_uuid", lambda: "existing-token")
module.session.clear()
module.session["oauth_state"] = "existing-state"
_set_request_args(monkeypatch, module, {"state": "existing-state", "code": "code"})
res = _run(module.oauth_callback("github"))
assert res["redirect"] == "/?auth=existing-user"
assert existing_user.access_token == "existing-token"
assert existing_user.save_calls == 1
assert login_calls and login_calls[-1] is existing_user
@pytest.mark.p2
def test_github_callback_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
_set_request_args(monkeypatch, module, {"code": "code"})
module.session.clear()
async def _request_error(_method, _url, **_kwargs):
return _DummyHTTPResponse({"error": "bad", "error_description": "boom"})
monkeypatch.setattr(module, "async_request", _request_error)
res = _run(module.github_callback())
assert res["redirect"] == "/?error=boom"
async def _request_scope_missing(_method, _url, **_kwargs):
return _DummyHTTPResponse({"scope": "repo", "access_token": "token-gh"})
monkeypatch.setattr(module, "async_request", _request_scope_missing)
res = _run(module.github_callback())
assert res["redirect"] == "/?error=user:email not in scope"
async def _request_token(_method, _url, **_kwargs):
return _DummyHTTPResponse({"scope": "user:email,repo", "access_token": "token-gh"})
monkeypatch.setattr(module, "async_request", _request_token)
monkeypatch.setattr(
module,
"user_info_from_github",
lambda _token: _AwaitableValue({"email": "gh@example.com", "avatar_url": "http://img", "login": "gh-user"}),
)
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
rollback_calls = []
monkeypatch.setattr(module, "rollback_user_registration", lambda user_id: rollback_calls.append(user_id))
monkeypatch.setattr(module, "get_uuid", lambda: "gh-user-id")
def _raise_download(_url):
raise RuntimeError("download explode")
monkeypatch.setattr(module, "download_img", _raise_download)
monkeypatch.setattr(module, "user_register", lambda _user_id, _user: None)
res = _run(module.github_callback())
assert "Fail to register gh@example.com." in res["redirect"]
assert rollback_calls == ["gh-user-id"]
monkeypatch.setattr(module, "download_img", lambda _url: "avatar")
monkeypatch.setattr(
module,
"user_register",
lambda _user_id, _user: [_DummyUser("dup-1", "gh@example.com"), _DummyUser("dup-2", "gh@example.com")],
)
rollback_calls.clear()
res = _run(module.github_callback())
assert "Same email: gh@example.com exists!" in res["redirect"]
assert rollback_calls == ["gh-user-id"]
new_user = _DummyUser("gh-new-user", "gh@example.com")
login_calls = []
monkeypatch.setattr(module, "login_user", lambda user: login_calls.append(user))
monkeypatch.setattr(module, "user_register", lambda _user_id, _user: [new_user])
res = _run(module.github_callback())
assert res["redirect"] == "/?auth=gh-new-user"
assert login_calls and login_calls[-1] is new_user
inactive_user = _DummyUser("gh-existing", "gh@example.com", is_active="0")
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [inactive_user])
res = _run(module.github_callback())
assert res["redirect"] == "/?error=user_inactive"
existing_user = _DummyUser("gh-existing", "gh@example.com")
login_calls.clear()
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [existing_user])
monkeypatch.setattr(module, "login_user", lambda user: login_calls.append(user))
monkeypatch.setattr(module, "get_uuid", lambda: "gh-existing-token")
res = _run(module.github_callback())
assert res["redirect"] == "/?auth=gh-existing"
assert existing_user.access_token == "gh-existing-token"
assert existing_user.save_calls == 1
assert login_calls and login_calls[-1] is existing_user
@pytest.mark.p2
def test_feishu_callback_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
_set_request_args(monkeypatch, module, {"code": "code"})
module.session.clear()
def _patch_async_queue(payloads):
queue = list(payloads)
async def _request(_method, _url, **_kwargs):
return _DummyHTTPResponse(queue.pop(0))
monkeypatch.setattr(module, "async_request", _request)
_patch_async_queue([{"code": 1}])
res = _run(module.feishu_callback())
assert "/?error=" in res["redirect"]
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 1, "message": "bad token"},
]
)
res = _run(module.feishu_callback())
assert res["redirect"] == "/?error=bad token"
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 0, "data": {"scope": "other", "access_token": "feishu-access"}},
]
)
res = _run(module.feishu_callback())
assert "contact:user.email:readonly not in scope" in res["redirect"]
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 0, "data": {"scope": "contact:user.email:readonly", "access_token": "feishu-access"}},
]
)
monkeypatch.setattr(
module,
"user_info_from_feishu",
lambda _token: _AwaitableValue({"email": "fs@example.com", "avatar_url": "http://img", "en_name": "fs-user"}),
)
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
rollback_calls = []
monkeypatch.setattr(module, "rollback_user_registration", lambda user_id: rollback_calls.append(user_id))
monkeypatch.setattr(module, "get_uuid", lambda: "fs-user-id")
def _raise_download(_url):
raise RuntimeError("download explode")
monkeypatch.setattr(module, "download_img", _raise_download)
monkeypatch.setattr(module, "user_register", lambda _user_id, _user: None)
res = _run(module.feishu_callback())
assert "Fail to register fs@example.com." in res["redirect"]
assert rollback_calls == ["fs-user-id"]
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 0, "data": {"scope": "contact:user.email:readonly", "access_token": "feishu-access"}},
]
)
monkeypatch.setattr(module, "download_img", lambda _url: "avatar")
monkeypatch.setattr(
module,
"user_register",
lambda _user_id, _user: [_DummyUser("dup-1", "fs@example.com"), _DummyUser("dup-2", "fs@example.com")],
)
rollback_calls.clear()
res = _run(module.feishu_callback())
assert "Same email: fs@example.com exists!" in res["redirect"]
assert rollback_calls == ["fs-user-id"]
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 0, "data": {"scope": "contact:user.email:readonly", "access_token": "feishu-access"}},
]
)
new_user = _DummyUser("fs-new-user", "fs@example.com")
login_calls = []
monkeypatch.setattr(module, "login_user", lambda user: login_calls.append(user))
monkeypatch.setattr(module, "user_register", lambda _user_id, _user: [new_user])
res = _run(module.feishu_callback())
assert res["redirect"] == "/?auth=fs-new-user"
assert login_calls and login_calls[-1] is new_user
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 0, "data": {"scope": "contact:user.email:readonly", "access_token": "feishu-access"}},
]
)
inactive_user = _DummyUser("fs-existing", "fs@example.com", is_active="0")
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [inactive_user])
res = _run(module.feishu_callback())
assert res["redirect"] == "/?error=user_inactive"
_patch_async_queue(
[
{"code": 0, "app_access_token": "app-token"},
{"code": 0, "data": {"scope": "contact:user.email:readonly", "access_token": "feishu-access"}},
]
)
existing_user = _DummyUser("fs-existing", "fs@example.com")
login_calls.clear()
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [existing_user])
monkeypatch.setattr(module, "login_user", lambda user: login_calls.append(user))
monkeypatch.setattr(module, "get_uuid", lambda: "fs-existing-token")
res = _run(module.feishu_callback())
assert res["redirect"] == "/?auth=fs-existing"
assert existing_user.access_token == "fs-existing-token"
assert existing_user.save_calls == 1
assert login_calls and login_calls[-1] is existing_user
@pytest.mark.p2
def test_oauth_user_info_helpers_unit(monkeypatch):
module = _load_user_app(monkeypatch)
async def _request_feishu(_method, _url, **_kwargs):
return _DummyHTTPResponse({"data": {"email": "", "en_name": "Feishu User"}})
monkeypatch.setattr(module, "async_request", _request_feishu)
feishu_user = _run(module.user_info_from_feishu("token-feishu"))
assert feishu_user["email"] is None
assert feishu_user["en_name"] == "Feishu User"
async def _request_github(_method, url, **_kwargs):
if "emails" in url:
return _DummyHTTPResponse(
[
{"email": "secondary@example.com", "primary": False},
{"email": "primary@example.com", "primary": True},
]
)
return _DummyHTTPResponse({"login": "gh-user"})
monkeypatch.setattr(module, "async_request", _request_github)
github_user = _run(module.user_info_from_github("token-github"))
assert github_user["login"] == "gh-user"
assert github_user["email"] == "primary@example.com"
@pytest.mark.p2
def test_logout_setting_profile_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
current_user = _DummyUser("current-user", "current@example.com", password="stored-password")
monkeypatch.setattr(module, "current_user", current_user)
monkeypatch.setattr(module.secrets, "token_hex", lambda _n: "abcdef")
logout_calls = []
monkeypatch.setattr(module, "logout_user", lambda: logout_calls.append(True))
res = _run(module.log_out())
assert res["code"] == 0
assert current_user.access_token == "INVALID_abcdef"
assert current_user.save_calls == 1
assert logout_calls == [True]
_set_request_json(monkeypatch, module, {"password": "old-password", "new_password": "new-password"})
monkeypatch.setattr(module, "decrypt", lambda value: value)
monkeypatch.setattr(module, "check_password_hash", lambda _hashed, _plain: False)
res = _run(module.setting_user())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "Password error" in res["message"]
_set_request_json(
monkeypatch,
module,
{
"password": "old-password",
"new_password": "new-password",
"nickname": "neo",
"email": "blocked@example.com",
"status": "disabled",
"theme": "dark",
},
)
monkeypatch.setattr(module, "check_password_hash", lambda _hashed, _plain: True)
monkeypatch.setattr(module, "decrypt", lambda value: f"dec:{value}")
monkeypatch.setattr(module, "generate_password_hash", lambda value: f"hash:{value}")
update_calls = {}
def _update_by_id(user_id, payload):
update_calls["user_id"] = user_id
update_calls["payload"] = payload
return True
monkeypatch.setattr(module.UserService, "update_by_id", _update_by_id)
res = _run(module.setting_user())
assert res["code"] == 0
assert res["data"] is True
assert update_calls["user_id"] == "current-user"
assert update_calls["payload"]["password"] == "hash:dec:new-password"
assert update_calls["payload"]["nickname"] == "neo"
assert update_calls["payload"]["theme"] == "dark"
assert "email" not in update_calls["payload"]
assert "status" not in update_calls["payload"]
_set_request_json(monkeypatch, module, {"nickname": "neo"})
def _raise_update(_user_id, _payload):
raise RuntimeError("update explode")
monkeypatch.setattr(module.UserService, "update_by_id", _raise_update)
res = _run(module.setting_user())
assert res["code"] == module.RetCode.EXCEPTION_ERROR
assert "Update failure" in res["message"]
res = _run(module.user_profile())
assert res["code"] == 0
assert res["data"] == current_user.to_dict()
@pytest.mark.p2
def test_registration_helpers_and_register_route_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
deleted = {"user": 0, "tenant": 0, "user_tenant": 0, "tenant_llm": 0}
monkeypatch.setattr(module.UserService, "delete_by_id", lambda _user_id: deleted.__setitem__("user", deleted["user"] + 1))
monkeypatch.setattr(module.TenantService, "delete_by_id", lambda _tenant_id: deleted.__setitem__("tenant", deleted["tenant"] + 1))
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: [SimpleNamespace(id="ut-1")])
monkeypatch.setattr(module.UserTenantService, "delete_by_id", lambda _ut_id: deleted.__setitem__("user_tenant", deleted["user_tenant"] + 1))
class _DeleteQuery:
def where(self, *_args, **_kwargs):
return self
def execute(self):
deleted["tenant_llm"] += 1
return 1
monkeypatch.setattr(module.TenantLLM, "delete", lambda: _DeleteQuery())
module.rollback_user_registration("user-1")
assert deleted == {"user": 1, "tenant": 1, "user_tenant": 1, "tenant_llm": 1}, deleted
monkeypatch.setattr(module.UserService, "delete_by_id", lambda _user_id: (_ for _ in ()).throw(RuntimeError("u boom")))
monkeypatch.setattr(module.TenantService, "delete_by_id", lambda _tenant_id: (_ for _ in ()).throw(RuntimeError("t boom")))
monkeypatch.setattr(module.UserTenantService, "query", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("ut boom")))
class _RaisingDeleteQuery:
def where(self, *_args, **_kwargs):
raise RuntimeError("llm boom")
monkeypatch.setattr(module.TenantLLM, "delete", lambda: _RaisingDeleteQuery())
module.rollback_user_registration("user-2")
monkeypatch.setattr(module.UserService, "save", lambda **_kwargs: False)
res = module.user_register(
"new-user",
{
"nickname": "new",
"email": "new@example.com",
"password": "pw",
"access_token": "tk",
"login_channel": "password",
"last_login_time": "2024-01-01 00:00:00",
"is_superuser": False,
},
)
assert res is None
monkeypatch.setattr(module.settings, "REGISTER_ENABLED", False)
_set_request_json(monkeypatch, module, {"nickname": "neo", "email": "neo@example.com", "password": "enc"})
res = _run(module.user_add())
assert res["code"] == module.RetCode.OPERATING_ERROR, res
assert "disabled" in res["message"], res
monkeypatch.setattr(module.settings, "REGISTER_ENABLED", True)
_set_request_json(monkeypatch, module, {"nickname": "neo", "email": "bad-email", "password": "enc"})
res = _run(module.user_add())
assert res["code"] == module.RetCode.OPERATING_ERROR, res
assert "Invalid email address" in res["message"], res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module, "decrypt", lambda value: value)
monkeypatch.setattr(module, "get_uuid", lambda: "new-user-id")
rollback_calls = []
monkeypatch.setattr(module, "rollback_user_registration", lambda user_id: rollback_calls.append(user_id))
_set_request_json(monkeypatch, module, {"nickname": "neo", "email": "neo@example.com", "password": "enc"})
monkeypatch.setattr(module, "user_register", lambda _user_id, _payload: None)
res = _run(module.user_add())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "Fail to register neo@example.com." in res["message"], res
assert rollback_calls == ["new-user-id"], rollback_calls
rollback_calls.clear()
monkeypatch.setattr(
module,
"user_register",
lambda _user_id, _payload: [_DummyUser("dup-1", "neo@example.com"), _DummyUser("dup-2", "neo@example.com")],
)
_set_request_json(monkeypatch, module, {"nickname": "neo", "email": "neo@example.com", "password": "enc"})
res = _run(module.user_add())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "Same email: neo@example.com exists!" in res["message"], res
assert rollback_calls == ["new-user-id"], rollback_calls
@pytest.mark.p2
def test_tenant_info_and_set_tenant_info_exception_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: [])
res = _run(module.tenant_info())
assert res["code"] == module.RetCode.DATA_ERROR, res
assert "Tenant not found" in res["message"], res
def _raise_tenant_info(_uid):
raise RuntimeError("tenant info boom")
monkeypatch.setattr(module.TenantService, "get_info_by", _raise_tenant_info)
res = _run(module.tenant_info())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "tenant info boom" in res["message"], res
_set_request_json(
monkeypatch,
module,
{"tenant_id": "tenant-1", "llm_id": "l", "embd_id": "e", "asr_id": "a", "img2txt_id": "i"},
)
def _raise_update(_tenant_id, _payload):
raise RuntimeError("tenant update boom")
monkeypatch.setattr(module.TenantService, "update_by_id", _raise_update)
res = _run(module.set_tenant_info())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
assert "tenant update boom" in res["message"], res
@pytest.mark.p2
def test_forget_captcha_and_send_otp_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
class _Headers(dict):
def set(self, key, value):
self[key] = value
async def _make_response(data):
return SimpleNamespace(data=data, headers=_Headers())
monkeypatch.setattr(module, "make_response", _make_response)
captcha_pkg = ModuleType("captcha")
captcha_image_mod = ModuleType("captcha.image")
class _ImageCaptcha:
def __init__(self, **_kwargs):
pass
def generate(self, text):
return SimpleNamespace(read=lambda: f"img:{text}".encode())
captcha_image_mod.ImageCaptcha = _ImageCaptcha
monkeypatch.setitem(sys.modules, "captcha", captcha_pkg)
monkeypatch.setitem(sys.modules, "captcha.image", captcha_image_mod)
_set_request_args(monkeypatch, module, {"email": ""})
res = _run(module.forget_get_captcha())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
_set_request_args(monkeypatch, module, {"email": "nobody@example.com"})
res = _run(module.forget_get_captcha())
assert res["code"] == module.RetCode.DATA_ERROR, res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [_DummyUser("u1", "ok@example.com")])
monkeypatch.setattr(module.secrets, "choice", lambda _allowed: "A")
_set_request_args(monkeypatch, module, {"email": "ok@example.com"})
res = _run(module.forget_get_captcha())
assert res.data.startswith(b"img:"), res
assert res.headers["Content-Type"] == "image/JPEG", res.headers
assert module.REDIS_CONN.get(module.captcha_key("ok@example.com")), module.REDIS_CONN.store
_set_request_json(monkeypatch, module, {"email": "", "captcha": ""})
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
_set_request_json(monkeypatch, module, {"email": "none@example.com", "captcha": "AAAA"})
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.DATA_ERROR, res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [_DummyUser("u1", "ok@example.com")])
_set_request_json(monkeypatch, module, {"email": "ok@example.com", "captcha": "AAAA"})
module.REDIS_CONN.store.pop(module.captcha_key("ok@example.com"), None)
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.NOT_EFFECTIVE, res
module.REDIS_CONN.store[module.captcha_key("ok@example.com")] = "ABCD"
_set_request_json(monkeypatch, module, {"email": "ok@example.com", "captcha": "ZZZZ"})
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
monkeypatch.setattr(module.time, "time", lambda: 1000)
k_code, k_attempts, k_last, k_lock = module.otp_keys("ok@example.com")
module.REDIS_CONN.store[module.captcha_key("ok@example.com")] = "ABCD"
module.REDIS_CONN.store[k_last] = "990"
_set_request_json(monkeypatch, module, {"email": "ok@example.com", "captcha": "ABCD"})
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.NOT_EFFECTIVE, res
assert "wait" in res["message"], res
module.REDIS_CONN.store[module.captcha_key("ok@example.com")] = "ABCD"
module.REDIS_CONN.store[k_last] = "bad-timestamp"
monkeypatch.setattr(module.secrets, "choice", lambda _allowed: "B")
monkeypatch.setattr(module.os, "urandom", lambda _n: b"\x00" * 16)
monkeypatch.setattr(module, "hash_code", lambda code, _salt: f"HASH_{code}")
async def _raise_send_email(*_args, **_kwargs):
raise RuntimeError("send email boom")
monkeypatch.setattr(module, "send_email_html", _raise_send_email)
_set_request_json(monkeypatch, module, {"email": "ok@example.com", "captcha": "ABCD"})
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.SERVER_ERROR, res
assert "failed to send email" in res["message"], res
async def _ok_send_email(*_args, **_kwargs):
return True
module.REDIS_CONN.store[module.captcha_key("ok@example.com")] = "ABCD"
module.REDIS_CONN.store.pop(k_last, None)
monkeypatch.setattr(module, "send_email_html", _ok_send_email)
_set_request_json(monkeypatch, module, {"email": "ok@example.com", "captcha": "ABCD"})
res = _run(module.forget_send_otp())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["data"] is True, res
assert module.REDIS_CONN.get(k_code), module.REDIS_CONN.store
assert module.REDIS_CONN.get(k_attempts) == 0, module.REDIS_CONN.store
assert module.REDIS_CONN.get(k_lock) is None, module.REDIS_CONN.store
@pytest.mark.p2
def test_forget_verify_otp_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
email = "ok@example.com"
k_code, k_attempts, k_last, k_lock = module.otp_keys(email)
salt = b"\x01" * 16
monkeypatch.setattr(module, "hash_code", lambda code, _salt: f"HASH_{code}")
_set_request_json(monkeypatch, module, {})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [])
_set_request_json(monkeypatch, module, {"email": email, "otp": "ABCDEF"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.DATA_ERROR, res
monkeypatch.setattr(module.UserService, "query", lambda **_kwargs: [_DummyUser("u1", email)])
module.REDIS_CONN.store[k_lock] = "1"
_set_request_json(monkeypatch, module, {"email": email, "otp": "ABCDEF"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.NOT_EFFECTIVE, res
module.REDIS_CONN.store.pop(k_lock, None)
module.REDIS_CONN.store.pop(k_code, None)
_set_request_json(monkeypatch, module, {"email": email, "otp": "ABCDEF"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.NOT_EFFECTIVE, res
module.REDIS_CONN.store[k_code] = "broken"
_set_request_json(monkeypatch, module, {"email": email, "otp": "ABCDEF"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
module.REDIS_CONN.store[k_code] = f"HASH_CORRECT:{salt.hex()}"
module.REDIS_CONN.store[k_attempts] = "bad-int"
_set_request_json(monkeypatch, module, {"email": email, "otp": "wrong"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
assert module.REDIS_CONN.get(k_attempts) == 1, module.REDIS_CONN.store
module.REDIS_CONN.store[k_code] = f"HASH_CORRECT:{salt.hex()}"
module.REDIS_CONN.store[k_attempts] = str(module.ATTEMPT_LIMIT - 1)
_set_request_json(monkeypatch, module, {"email": email, "otp": "wrong"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
assert module.REDIS_CONN.get(k_lock) is not None, module.REDIS_CONN.store
module.REDIS_CONN.store.pop(k_lock, None)
module.REDIS_CONN.store[k_code] = f"HASH_ABCDEF:{salt.hex()}"
module.REDIS_CONN.store[k_attempts] = "0"
module.REDIS_CONN.store[k_last] = "1000"
def _set_with_verified_fail(key, value, _ttl=None):
if key == module._verified_key(email):
raise RuntimeError("verified set boom")
module.REDIS_CONN.store[key] = value
monkeypatch.setattr(module.REDIS_CONN, "set", _set_with_verified_fail)
_set_request_json(monkeypatch, module, {"email": email, "otp": "abcdef"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.SERVER_ERROR, res
monkeypatch.setattr(module.REDIS_CONN, "set", lambda key, value, _ttl=None: module.REDIS_CONN.store.__setitem__(key, value))
module.REDIS_CONN.store[k_code] = f"HASH_ABCDEF:{salt.hex()}"
module.REDIS_CONN.store[k_attempts] = "0"
module.REDIS_CONN.store[k_last] = "1000"
_set_request_json(monkeypatch, module, {"email": email, "otp": "abcdef"})
res = _run(module.forget_verify_otp())
assert res["code"] == module.RetCode.SUCCESS, res
assert module.REDIS_CONN.get(k_code) is None, module.REDIS_CONN.store
assert module.REDIS_CONN.get(k_attempts) is None, module.REDIS_CONN.store
assert module.REDIS_CONN.get(k_last) is None, module.REDIS_CONN.store
assert module.REDIS_CONN.get(k_lock) is None, module.REDIS_CONN.store
assert module.REDIS_CONN.get(module._verified_key(email)) == "1", module.REDIS_CONN.store
@pytest.mark.p2
def test_forget_reset_password_matrix_unit(monkeypatch):
module = _load_user_app(monkeypatch)
email = "reset@example.com"
v_key = module._verified_key(email)
user = _DummyUser("u-reset", email, nickname="reset-user")
pwd_a = base64.b64encode(b"new-password").decode()
pwd_b = base64.b64encode(b"confirm-password").decode()
pwd_same = base64.b64encode(b"same-password").decode()
monkeypatch.setattr(module, "decrypt", lambda value: value)
_set_request_json(monkeypatch, module, {"email": email, "new_password": pwd_same, "confirm_new_password": pwd_same})
module.REDIS_CONN.store.pop(v_key, None)
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR, res
module.REDIS_CONN.store[v_key] = "1"
monkeypatch.setattr(module, "decrypt", lambda _value: "")
_set_request_json(monkeypatch, module, {"email": email, "new_password": "", "confirm_new_password": ""})
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
monkeypatch.setattr(module, "decrypt", lambda value: value)
module.REDIS_CONN.store[v_key] = "1"
_set_request_json(monkeypatch, module, {"email": email, "new_password": pwd_a, "confirm_new_password": pwd_b})
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.ARGUMENT_ERROR, res
assert "do not match" in res["message"], res
module.REDIS_CONN.store[v_key] = "1"
monkeypatch.setattr(module.UserService, "query_user_by_email", lambda **_kwargs: [])
_set_request_json(monkeypatch, module, {"email": email, "new_password": pwd_same, "confirm_new_password": pwd_same})
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.DATA_ERROR, res
module.REDIS_CONN.store[v_key] = "1"
monkeypatch.setattr(module.UserService, "query_user_by_email", lambda **_kwargs: [user])
def _raise_update_password(_user_id, _new_pwd):
raise RuntimeError("reset boom")
monkeypatch.setattr(module.UserService, "update_user_password", _raise_update_password)
_set_request_json(monkeypatch, module, {"email": email, "new_password": pwd_same, "confirm_new_password": pwd_same})
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.EXCEPTION_ERROR, res
module.REDIS_CONN.store[v_key] = "1"
monkeypatch.setattr(module.UserService, "update_user_password", lambda _user_id, _new_pwd: True)
monkeypatch.setattr(module.REDIS_CONN, "delete", lambda _key: (_ for _ in ()).throw(RuntimeError("delete boom")))
_set_request_json(monkeypatch, module, {"email": email, "new_password": pwd_same, "confirm_new_password": pwd_same})
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["auth"] == user.get_id(), res
monkeypatch.setattr(module.REDIS_CONN, "delete", lambda key: module.REDIS_CONN.store.pop(key, None))
module.REDIS_CONN.store[v_key] = "1"
_set_request_json(monkeypatch, module, {"email": email, "new_password": pwd_same, "confirm_new_password": pwd_same})
res = _run(module.forget_reset_password())
assert res["code"] == module.RetCode.SUCCESS, res
assert res["auth"] == user.get_id(), res
assert module.REDIS_CONN.get(v_key) is None, module.REDIS_CONN.store
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_web_api/test_user_app/test_user_app_unit.py",
"license": "Apache License 2.0",
"lines": 1061,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_sdk_api/test_dataset_mangement/test_auto_metadata.py | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
@pytest.mark.usefixtures("clear_datasets")
class TestAutoMetadataOnCreate:
@pytest.mark.p1
def test_create_dataset_with_auto_metadata(self, client):
payload = {
"name": "auto_metadata_create",
"auto_metadata_config": {
"enabled": True,
"fields": [
{
"name": "author",
"type": "string",
"description": "The author of the document",
"examples": ["John Doe", "Jane Smith"],
"restrict_values": False,
},
{
"name": "category",
"type": "list",
"description": "Document category",
"examples": ["Technical", "Business"],
"restrict_values": True,
},
],
},
}
dataset = client.create_dataset(**payload)
# The SDK should expose parser_config via internal properties or metadata;
# we rely on the HTTP API for verification via get_auto_metadata.
cfg = client.get_auto_metadata(dataset_id=dataset.id)
assert cfg["enabled"] is True
assert len(cfg["fields"]) == 2
names = {f["name"] for f in cfg["fields"]}
assert names == {"author", "category"}
@pytest.mark.usefixtures("clear_datasets")
class TestAutoMetadataOnUpdate:
@pytest.mark.p1
def test_update_auto_metadata_via_dataset_update(self, client, add_dataset_func):
dataset = add_dataset_func
# Initially set auto-metadata via dataset.update
payload = {
"auto_metadata_config": {
"enabled": True,
"fields": [
{
"name": "tags",
"type": "list",
"description": "Document tags",
"examples": ["AI", "ML", "RAG"],
"restrict_values": False,
}
],
}
}
dataset.update(payload)
cfg = client.get_auto_metadata(dataset_id=dataset.id)
assert cfg["enabled"] is True
assert len(cfg["fields"]) == 1
assert cfg["fields"][0]["name"] == "tags"
assert cfg["fields"][0]["type"] == "list"
# Disable auto-metadata and replace fields
update_cfg = {
"enabled": False,
"fields": [
{
"name": "year",
"type": "time",
"description": "Publication year",
"examples": None,
"restrict_values": False,
}
],
}
client.update_auto_metadata(dataset_id=dataset.id, **update_cfg)
cfg2 = client.get_auto_metadata(dataset_id=dataset.id)
assert cfg2["enabled"] is False
assert len(cfg2["fields"]) == 1
assert cfg2["fields"][0]["name"] == "year"
assert cfg2["fields"][0]["type"] == "time"
@pytest.mark.usefixtures("clear_datasets")
class TestAutoMetadataValidation:
@pytest.mark.p2
def test_invalid_field_type_rejected(self, client):
payload = {
"name": "auto_metadata_invalid_type",
"auto_metadata_config": {
"enabled": True,
"fields": [
{
"name": "invalid_type",
"type": "unknown", # invalid literal
}
],
},
}
with pytest.raises(Exception) as exc_info:
client.create_dataset(**payload)
msg = str(exc_info.value)
# Pydantic literal_error message should appear
assert "Input should be" in msg or "literal_error" in msg
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_sdk_api/test_dataset_mangement/test_auto_metadata.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_assistant_management/test_chat_sdk_routes_unit.py | #
# Copyright 2026 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import importlib.util
import sys
from copy import deepcopy
from pathlib import Path
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager:
def route(self, *_args, **_kwargs):
def decorator(func):
return func
return decorator
class _AwaitableValue:
def __init__(self, value):
self._value = value
def __await__(self):
async def _co():
return self._value
return _co().__await__()
class _DummyKB:
def __init__(self, embd_id="embd@factory", chunk_num=1):
self.embd_id = embd_id
self.chunk_num = chunk_num
def to_json(self):
return {"id": "kb-1"}
class _DummyDialogRecord:
def __init__(self):
self._data = {
"id": "chat-1",
"name": "chat-name",
"prompt_config": {
"system": "Answer with {knowledge}",
"parameters": [{"key": "knowledge", "optional": False}],
"prologue": "hello",
"quote": True,
},
"llm_setting": {"temperature": 0.1},
"llm_id": "glm-4",
"similarity_threshold": 0.2,
"vector_similarity_weight": 0.3,
"top_n": 6,
"rerank_id": "",
"top_k": 1024,
"kb_ids": ["kb-1"],
"icon": "icon.png",
}
def to_json(self):
return deepcopy(self._data)
def _run(coro):
return asyncio.run(coro)
def _load_chat_module(monkeypatch):
repo_root = Path(__file__).resolve().parents[4]
common_pkg = ModuleType("common")
common_pkg.__path__ = [str(repo_root / "common")]
monkeypatch.setitem(sys.modules, "common", common_pkg)
deepdoc_pkg = ModuleType("deepdoc")
deepdoc_parser_pkg = ModuleType("deepdoc.parser")
deepdoc_parser_pkg.__path__ = []
class _StubPdfParser:
pass
class _StubExcelParser:
pass
class _StubDocxParser:
pass
deepdoc_parser_pkg.PdfParser = _StubPdfParser
deepdoc_parser_pkg.ExcelParser = _StubExcelParser
deepdoc_parser_pkg.DocxParser = _StubDocxParser
deepdoc_pkg.parser = deepdoc_parser_pkg
monkeypatch.setitem(sys.modules, "deepdoc", deepdoc_pkg)
monkeypatch.setitem(sys.modules, "deepdoc.parser", deepdoc_parser_pkg)
deepdoc_excel_module = ModuleType("deepdoc.parser.excel_parser")
deepdoc_excel_module.RAGFlowExcelParser = _StubExcelParser
monkeypatch.setitem(sys.modules, "deepdoc.parser.excel_parser", deepdoc_excel_module)
deepdoc_parser_utils = ModuleType("deepdoc.parser.utils")
deepdoc_parser_utils.get_text = lambda *_args, **_kwargs: ""
monkeypatch.setitem(sys.modules, "deepdoc.parser.utils", deepdoc_parser_utils)
monkeypatch.setitem(sys.modules, "xgboost", ModuleType("xgboost"))
module_name = "test_chat_sdk_routes_unit_module"
module_path = repo_root / "api" / "apps" / "sdk" / "chat.py"
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
module.manager = _DummyManager()
monkeypatch.setitem(sys.modules, module_name, module)
spec.loader.exec_module(module)
return module
def _set_request_json(monkeypatch, module, payload):
monkeypatch.setattr(module, "get_request_json", lambda: _AwaitableValue(deepcopy(payload)))
@pytest.mark.p2
def test_create_internal_failure_paths(monkeypatch):
module = _load_chat_module(monkeypatch)
_set_request_json(monkeypatch, module, {"name": "chat-a", "dataset_ids": ["kb-1", "kb-2"]})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: [SimpleNamespace(id="kb")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [_DummyKB(chunk_num=1)])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [_DummyKB(embd_id="embd-a@x"), _DummyKB(embd_id="embd-b@y")])
monkeypatch.setattr(module.TenantLLMService, "split_model_name_and_factory", lambda model: (model.split("@")[0], "factory"))
res = _run(module.create.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "different embedding models" in res["message"]
_set_request_json(monkeypatch, module, {"name": "chat-a", "dataset_ids": []})
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tid: (False, None))
res = _run(module.create.__wrapped__("tenant-1"))
assert res["message"] == "Tenant not found!"
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tid: (True, SimpleNamespace(llm_id="glm-4")))
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
monkeypatch.setattr(module.DialogService, "save", lambda **_kwargs: False)
res = _run(module.create.__wrapped__("tenant-1"))
assert res["message"] == "Fail to new a chat!"
monkeypatch.setattr(module.DialogService, "save", lambda **_kwargs: True)
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (False, None))
res = _run(module.create.__wrapped__("tenant-1"))
assert res["message"] == "Fail to new a chat!"
_set_request_json(
monkeypatch,
module,
{"name": "chat-rerank", "dataset_ids": [], "prompt": {"rerank_model": "unknown-rerank-model"}},
)
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tid: (True, SimpleNamespace(llm_id="glm-4")))
rerank_query_calls = []
def _mock_tenant_llm_query(**kwargs):
rerank_query_calls.append(kwargs)
return False
monkeypatch.setattr(module.TenantLLMService, "query", _mock_tenant_llm_query)
res = _run(module.create.__wrapped__("tenant-1"))
assert "`rerank_model` unknown-rerank-model doesn't exist" in res["message"]
assert rerank_query_calls[-1]["model_type"] == "rerank"
assert rerank_query_calls[-1]["llm_name"] == "unknown-rerank-model"
_set_request_json(monkeypatch, module, {"name": "chat-tenant", "dataset_ids": [], "tenant_id": "tenant-forbidden"})
res = _run(module.create.__wrapped__("tenant-1"))
assert res["message"] == "`tenant_id` must not be provided."
@pytest.mark.p2
def test_update_internal_failure_paths(monkeypatch):
module = _load_chat_module(monkeypatch)
_set_request_json(monkeypatch, module, {"name": "anything"})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["message"] == "You do not own the chat"
_set_request_json(monkeypatch, module, {"name": "chat-name"})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="chat-1")])
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tid: (False, None))
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["message"] == "Tenant not found!"
_set_request_json(monkeypatch, module, {"dataset_ids": ["kb-1", "kb-2"]})
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tid: (True, SimpleNamespace(id="tenant-1")))
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: [SimpleNamespace(id="kb")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [_DummyKB(chunk_num=1)])
monkeypatch.setattr(module.KnowledgebaseService, "get_by_ids", lambda _ids: [_DummyKB(embd_id="embd-a@x"), _DummyKB(embd_id="embd-b@y")])
monkeypatch.setattr(module.TenantLLMService, "split_model_name_and_factory", lambda model: (model.split("@")[0], "factory"))
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["code"] == module.RetCode.AUTHENTICATION_ERROR
assert "different embedding models" in res["message"]
_set_request_json(monkeypatch, module, {"avatar": "new-avatar"})
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (True, _DummyDialogRecord()))
monkeypatch.setattr(module.DialogService, "update_by_id", lambda *_args, **_kwargs: False)
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["message"] == "Chat not found!"
monkeypatch.setattr(module.TenantService, "get_by_id", lambda _tid: (True, SimpleNamespace(id="tenant-1")))
monkeypatch.setattr(module.DialogService, "get_by_id", lambda _id: (True, _DummyDialogRecord()))
monkeypatch.setattr(module.DialogService, "update_by_id", lambda *_args, **_kwargs: True)
monkeypatch.setattr(
module.DialogService,
"query",
lambda **kwargs: (
[SimpleNamespace(id="chat-1")]
if kwargs.get("id") == "chat-1"
else ([SimpleNamespace(id="dup")] if kwargs.get("name") == "dup-name" else [])
),
)
monkeypatch.setattr(
module.TenantLLMService,
"split_model_name_and_factory",
lambda model: (model.split("@")[0], "factory"),
)
monkeypatch.setattr(
module.TenantLLMService,
"query",
lambda **kwargs: kwargs.get("llm_name") in {"glm-4", "allowed-rerank"},
)
_set_request_json(monkeypatch, module, {"show_quotation": True})
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["code"] == 0
_set_request_json(monkeypatch, module, {"dataset_ids": ["kb-no-owner"]})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: [])
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert "You don't own the dataset kb-no-owner" in res["message"]
_set_request_json(monkeypatch, module, {"dataset_ids": ["kb-unparsed"]})
monkeypatch.setattr(module.KnowledgebaseService, "accessible", lambda **_kwargs: [SimpleNamespace(id="kb-unparsed")])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [_DummyKB(chunk_num=0)])
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert "doesn't own parsed file" in res["message"]
_set_request_json(monkeypatch, module, {"llm": {"model_name": "unknown-model", "model_type": "unsupported"}})
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert "`model_name` unknown-model doesn't exist" in res["message"]
_set_request_json(
monkeypatch,
module,
{"prompt": {"prompt": "No placeholder", "variables": [{"key": "knowledge", "optional": False}], "rerank_model": "unknown-rerank"}},
)
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert "`rerank_model` unknown-rerank doesn't exist" in res["message"]
_set_request_json(
monkeypatch,
module,
{"prompt": {"prompt": "No placeholder", "variables": [{"key": "knowledge", "optional": False}]}},
)
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert "Parameter 'knowledge' is not used" in res["message"]
_set_request_json(
monkeypatch,
module,
{"prompt": {"prompt": "Optional-only prompt", "variables": [{"key": "maybe", "optional": True}]}},
)
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["code"] == 0
_set_request_json(monkeypatch, module, {"name": ""})
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["message"] == "`name` cannot be empty."
_set_request_json(monkeypatch, module, {"name": "dup-name"})
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["message"] == "Duplicated chat name in updating chat."
_set_request_json(monkeypatch, module, {"llm": {"model_name": "glm-4", "temperature": 0.9}})
res = _run(module.update.__wrapped__("tenant-1", "chat-1"))
assert res["code"] == 0
@pytest.mark.p2
def test_delete_duplicate_no_success_path(monkeypatch):
module = _load_chat_module(monkeypatch)
_set_request_json(monkeypatch, module, {"ids": ["chat-1", "chat-1"]})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="chat-1")])
monkeypatch.setattr(module.DialogService, "update_by_id", lambda *_args, **_kwargs: 0)
res = _run(module.delete_chats.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Duplicate assistant ids: chat-1" in res["message"]
_set_request_json(monkeypatch, module, {"ids": ["missing-chat"]})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [])
res = _run(module.delete_chats.__wrapped__("tenant-1"))
assert res["code"] == module.RetCode.DATA_ERROR
assert "Assistant(missing-chat) not found." in res["message"]
_set_request_json(monkeypatch, module, {"ids": ["chat-1", "chat-1"]})
monkeypatch.setattr(module.DialogService, "query", lambda **_kwargs: [SimpleNamespace(id="chat-1")])
monkeypatch.setattr(module.DialogService, "update_by_id", lambda *_args, **_kwargs: 1)
res = _run(module.delete_chats.__wrapped__("tenant-1"))
assert res["code"] == 0
assert res["data"]["success_count"] == 1
@pytest.mark.p2
def test_list_missing_kb_warning_and_desc_false(monkeypatch, caplog):
module = _load_chat_module(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args={"desc": "False"}))
monkeypatch.setattr(module.DialogService, "get_list", lambda *_args, **_kwargs: [
{
"id": "chat-1",
"name": "chat-name",
"prompt_config": {"system": "Answer with {knowledge}", "parameters": [{"key": "knowledge", "optional": False}], "do_refer": True},
"similarity_threshold": 0.2,
"vector_similarity_weight": 0.3,
"top_n": 6,
"rerank_id": "",
"llm_setting": {"temperature": 0.1},
"llm_id": "glm-4",
"kb_ids": ["missing-kb"],
"icon": "icon.png",
}
])
monkeypatch.setattr(module.KnowledgebaseService, "query", lambda **_kwargs: [])
with caplog.at_level("WARNING"):
res = module.list_chat.__wrapped__("tenant-1")
assert res["code"] == 0
assert res["data"][0]["datasets"] == []
assert res["data"][0]["avatar"] == "icon.png"
assert "does not exist" in caplog.text
| {
"repo_id": "infiniflow/ragflow",
"file_path": "test/testcases/test_http_api/test_chat_assistant_management/test_chat_sdk_routes_unit.py",
"license": "Apache License 2.0",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.