sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
huggingface/transformers:tests/models/video_llama_3/test_image_processing_video_llama_3.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import json import tempfile import unittest import numpy as np import requests from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from transformers.models.video_llama_3.image_processing_video_llama_3 import smart_resize from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VideoLlama3ImageProcessor if is_torchvision_available(): from transformers import VideoLlama3ImageProcessorFast class VideoLlama3ImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, num_frames=10, min_resolution=56, max_resolution=1024, min_pixels=14 * 14 * 16, max_pixels=14 * 14 * 16384, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_resize=True, patch_size=14, merge_size=1, do_convert_rgb=True, ): self.parent = parent self.batch_size = batch_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_channels = num_channels self.num_frames = num_frames self.image_mean = image_mean self.image_std = image_std self.min_pixels = min_pixels self.max_pixels = max_pixels self.patch_size = patch_size self.merge_size = merge_size self.do_resize = do_resize self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "image_mean": self.image_mean, "image_std": self.image_std, "min_pixels": self.min_pixels, "max_pixels": self.max_pixels, "patch_size": self.patch_size, "merge_size": self.merge_size, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) return [[image] for image in images] def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_channels=self.num_channels, num_frames=self.num_frames, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VideoLlama3ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VideoLlama3ImageProcessor if is_vision_available() else None fast_image_processing_class = VideoLlama3ImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = VideoLlama3ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "patch_size")) self.assertTrue(hasattr(image_processing, "merge_size")) def test_image_processor_to_json_string(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): if key not in ["min_pixels", "max_pixels"]: self.assertEqual(obj[key], value) def test_select_best_resolution(self): # Test with a final resize resolution best_resolution = smart_resize(561, 278, factor=28) self.assertEqual(best_resolution, (560, 280)) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image[0], Image.Image) # Test not batched input process_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (5329, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (37303, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image[0], np.ndarray) # Test not batched input process_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (5329, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (37303, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image[0], torch.Tensor) # Test not batched input process_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (5329, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (37303, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) @unittest.skip(reason="VideoLlama3ImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (37303, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = image_inputs[:3] + image_inputs[3:] process_out = image_processing(image_inputs_nested, return_tensors="pt") encoded_images_nested = process_out.pixel_values image_grid_thws_nested = process_out.image_grid_thw expected_output_image_shape = (37303, 588) expected_image_grid_thws = torch.Tensor([[1, 73, 73]] * 7) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Image processor should return same pixel values, independently of ipnut format self.assertTrue((encoded_images_nested == encoded_images).all()) self.assertTrue((image_grid_thws_nested == expected_image_grid_thws).all()) @unittest.skip( reason="`VideoLlama3ImageProcessor` works only with image inputs and doesn't process videos anymore." ) def test_video_inputs(self): pass def test_custom_image_size(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processing.save_pretrained(tmpdirname) image_processor_loaded = image_processing_class.from_pretrained( tmpdirname, max_pixels=56 * 56, min_pixels=28 * 28 ) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) process_out = image_processor_loaded(image_inputs, return_tensors="pt") expected_output_video_shape = [112, 588] self.assertListEqual(list(process_out.pixel_values.shape), expected_output_video_shape) def test_custom_pixels(self): pixel_choices = frozenset(itertools.product((100, 150, 200, 20000), (100, 150, 200, 20000))) for image_processing_class in self.image_processor_list: image_processor_dict = self.image_processor_dict.copy() for a_pixels, b_pixels in pixel_choices: image_processor_dict["min_pixels"] = min(a_pixels, b_pixels) image_processor_dict["max_pixels"] = max(a_pixels, b_pixels) image_processor = image_processing_class(**image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() # Just checking that it doesn't raise an error image_processor(image_inputs, return_tensors="pt") @require_vision @require_torch def test_slow_fast_equivalence(self): dummy_image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype) self._assert_slow_fast_tensors_equivalence( encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float() ) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype) self._assert_slow_fast_tensors_equivalence( encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float() ) def test_get_num_patches_without_images(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={}) self.assertEqual(num_patches, 49) num_patches = image_processing.get_number_of_image_patches(height=200, width=50, images_kwargs={}) self.assertEqual(num_patches, 56) num_patches = image_processing.get_number_of_image_patches( height=100, width=100, images_kwargs={"patch_size": 28} ) self.assertEqual(num_patches, 16)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/video_llama_3/test_image_processing_video_llama_3.py", "license": "Apache License 2.0", "lines": 309, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/video_llama_3/test_modeling_video_llama_3.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VideoLLaMA3 model.""" import copy import gc import inspect import tempfile import unittest import numpy as np import pytest import requests import torch.nn as nn from parameterized import parameterized from PIL import Image from transformers import ( AutoProcessor, VideoLlama3Config, VideoLlama3ForConditionalGeneration, VideoLlama3Model, VideoLlama3VisionConfig, VideoLlama3VisionModel, is_torch_available, ) from transformers.testing_utils import ( Expectations, backend_empty_cache, require_flash_attn, require_torch, require_torch_accelerator, set_config_for_less_flaky_test, set_model_for_less_flaky_test, slow, torch_device, ) from transformers.utils import ( is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, floats_tensor, ids_tensor, sdpa_kernel, ) if is_torch_available(): import torch def _test_encoder_eager_matches_sdpa_inference( self, dtype, output_attentions, enable_kernels, atols=None, rtols=None, ): """ This test is written as a regular function to be able to overload it easily with different tolerances. Otherwise, `parameterize.expand` prevents it as it removes the original function from the namespace. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") # convert shorthand name to torch.dtype if dtype == "fp16": dtype = torch.float16 elif dtype == "bf16": dtype = torch.bfloat16 elif dtype == "fp32": dtype = torch.float32 if not is_torch_fp16_available_on_device(torch_device) and dtype == torch.float16: self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if not is_torch_bf16_available_on_device(torch_device) and dtype == torch.bfloat16: self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Dictionary of tolerances for eager <> sdpa tests. Key = (device, sdpa_kernels_enabled, dtype) if atols is None: atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } if rtols is None: rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, # (different from others) ("cuda", True, torch.float16): 5e-3, } for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained_kwargs = { "pretrained_model_name_or_path": tmpdirname, "dtype": dtype, } if hasattr(config, "use_mask_token") or "use_mask_token" in inspect.signature(model.__init__).parameters: model_from_pretrained_kwargs["use_mask_token"] = True # TODO: remove this try/except, models should have a shared API try: model_sdpa = model_class.from_pretrained(**model_from_pretrained_kwargs, attn_implementation="sdpa") except ValueError: model_sdpa = model_class.from_pretrained(**model_from_pretrained_kwargs) model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained(**model_from_pretrained_kwargs, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) set_model_for_less_flaky_test(model_eager) set_model_for_less_flaky_test(model_sdpa) # TODO: if we can also check with `batch_size=1` without being flaky? for batch_size in [7]: input_data_batch_size = batch_size processed_inputs = {} processed_inputs[model.main_input_name] = inputs_dict[model.main_input_name] for key in getattr(self, "additional_model_inputs", []): # Some models don't have all `additional_model_inputs`, especially when we # craft cases to test model in different settings if key in inputs_dict: processed_inputs[key] = inputs_dict[key] for key, value in processed_inputs.items(): if torch.is_floating_point(value): value = value.to(dtype) if key == "pixel_values": continue # extend value to have at least `input_data_batch_size` elements if value.shape[0] < input_data_batch_size: size = (input_data_batch_size - value.shape[0], *value.shape[1:]) if key == "grid_thw": extension = torch.randint(high=5, size=size, dtype=value.dtype, device=torch_device) elif key == "merge_sizes": extension = torch.ones(size=size, dtype=value.dtype, device=torch_device) value = torch.cat((value, extension), dim=0).to(torch_device) processed_inputs[key] = value[:input_data_batch_size] pixel_values = processed_inputs["pixel_values"] target_len = torch.sum(processed_inputs["grid_thw"].prod(dim=1) // (processed_inputs["merge_sizes"] ** 2)) if pixel_values.size(0) < target_len: size = (input_data_batch_size - value.shape[0], *value.shape[1:]) extension = torch.randn( size=(target_len - pixel_values.size(0)), dtype=pixel_values.dtype, device=torch_device ) elif pixel_values.size(0) > target_len: pixel_values = pixel_values[:target_len] processed_inputs["pixel_values"] = pixel_values processed_inputs.update( { "output_hidden_states": True, "output_attentions": output_attentions, } ) # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with sdpa_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): prepared_inputs = self._prepare_for_class(processed_inputs, model_class) prepared_inputs = { k: v.to(torch_device) if isinstance(v, torch.Tensor) else v for k, v in prepared_inputs.items() } outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) key = "hidden_states" # TODO: rename logits -> hidden_states logits_eager = outputs_eager[key][-1] logits_sdpa = outputs_sdpa[key][-1] if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, dtype] rtol = rtols[torch_device, enable_kernels, dtype] elif torch_device == "hpu": atol = atols["cuda", enable_kernels, dtype] rtol = rtols["cuda", enable_kernels, dtype] elif torch_device == "xpu": # As of PyTorch 2.5 XPU backend supports only torch.nn.attention.SDPBackend.MATH # which is implemented on PyTorch level using aten operators and is # device agnostic with respect to implementation of each aten operator. atol = atols["cuda", False, dtype] rtol = rtols["cuda", False, dtype] else: atol = 1e-7 rtol = 1e-4 # Avoid test flakiness with bf16! # bf16 is not good at precision when the magnitude is larger. We have some models like `SiglipVision` with # this test passing all the time for fp32/fp16 but flaky with bf16. Furthermore, `llama` and `clip` have # this test passing all the time for bf16: it turns out their outputs are of smaller size (0.1 and 1.0) # while `siglip` has outputs with maximal values around 3.0/4.0. outputs_magnitude = float( (torch.max(logits_sdpa.abs().amax(), logits_eager.abs().amax())).detach().to("cpu") ) # The choice of `3e-2` in `outputs_magnitude * 1e-2` might not work if a model has even more larger outputs. # (we can try to analyze the `rtol` more closely element-wise in the future and adjust the `rtol` instead of `atol`). computed_atol = outputs_magnitude * 3e-2 if dtype == torch.bfloat16: atol = max(atol, computed_atol) results = [ torch.allclose(_logits_sdpa, _logits_eager, atol=atol, rtol=rtol) for (_logits_sdpa, _logits_eager) in zip(logits_sdpa, logits_eager) ] # If 80% batch elements have matched results, it's fine if np.mean(results) < 0.8: mean_relative_diff = ((logits_sdpa - logits_eager).abs() / (logits_eager.abs() + 1e-12)).mean() raise ValueError( f"mean relative difference for {key}: {mean_relative_diff:.3e}, torch atol = {atol}, torch rtol = " f"{rtol}" ) class VideoLlama3VisionModelTester: def __init__( self, parent, batch_size=12, patch_size=2, num_channels=3, image_size=14, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.patch_size = patch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope self.seq_length = (self.image_size // self.patch_size) ** 2 def get_config(self): return VideoLlama3VisionConfig( patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2), ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs num_patches = self.image_size // config.patch_size inputs_dict = { "pixel_values": pixel_values, "grid_thw": torch.tensor([[1, num_patches, num_patches]] * self.batch_size, device=torch_device), "merge_sizes": torch.tensor([1] * self.batch_size, device=torch_device), } return config, inputs_dict @require_torch class VideoLlama3VisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VideoLlama3VisionModel,) if is_torch_available() else () additional_model_inputs = ["grid_thw", "merge_sizes"] test_resize_embeddings = False test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = VideoLlama3VisionModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoLlama3VisionConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): if use_attention_mask: self.skipTest(reason="VideoLlama3VisionModel does not use attention mask") _test_encoder_eager_matches_sdpa_inference(self, dtype, output_attentions, enable_kernels) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # force eager attention to support output attentions config._attn_implementation = "eager" seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True for k in config.sub_configs: getattr(config, k).output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0][0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0][0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(copy.deepcopy(config)) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = torch.sum(inputs_dict["grid_thw"].prod(dim=1) // (inputs_dict["merge_sizes"] ** 2)) self.assertListEqual( list(hidden_states[0].shape), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True for k in config.sub_configs: getattr(config, k).output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for k in config.sub_configs: getattr(config, k).output_hidden_states = True config.output_hidden_states = True config.output_attentions = self.has_attentions for k in config.sub_configs: getattr(config, k).output_attentions = self.has_attentions # force eager attention to support output attentions config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class._from_config(config, attn_implementation="eager") model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0][0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) @unittest.skip("Vision model requires additional positional inputs (grid_thw and merge_sizes)") def test_flash_attn_2_inference_equivalence(self): pass @unittest.skip("Vision model requires additional positional inputs (grid_thw and merge_sizes)") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip("Vision model requires additional positional inputs (grid_thw and merge_sizes)") def test_flash_attn_kernels_inference_equivalence(self): pass class VideoLlama3VisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, num_channels=3, image_size=14, is_training=True, text_config={ "attention_dropout": 0.0, "bos_token_id": 0, "eos_token_id": 1, "pad_token_id": 2, "hidden_act": "silu", "hidden_size": 32, "intermediate_size": 37, "max_position_embeddings": 512, "max_window_layers": 3, "model_type": "qwen2", "num_attention_heads": 4, "num_hidden_layers": 2, "num_key_value_heads": 2, "rms_norm_eps": 1e-06, "rope_scaling": None, "rope_theta": 1000000.0, "sliding_window": None, "tie_word_embeddings": True, "vocab_size": 99, }, vision_config={ "attention_dropout": 0.0, "hidden_act": "gelu_pytorch_tanh", "hidden_size": 32, "intermediate_size": 64, "layer_norm_eps": 1e-06, "model_type": "video_llama_3_vision", "num_attention_heads": 4, "num_channels": 3, "num_hidden_layers": 2, "patch_size": 14, }, use_token_compression=True, image_token_id=3, video_token_id=4, ): self.parent = parent self.hidden_size = text_config["hidden_size"] self.num_hidden_layers = text_config["num_hidden_layers"] self.num_attention_heads = text_config["num_attention_heads"] self.patch_size = vision_config["patch_size"] self.batch_size = batch_size self.seq_length = seq_length self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.text_config = text_config self.vision_config = vision_config self.use_token_compression = use_token_compression self.image_token_id = image_token_id self.video_token_id = video_token_id self.num_image_tokens = 32 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return VideoLlama3Config( text_config=self.text_config, vision_config=self.vision_config, use_token_compression=self.use_token_compression, image_token_id=self.image_token_id, video_token_id=self.video_token_id, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2), ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = config.text_config.pad_token_id attention_mask[:, -1] = 0 input_ids[input_ids == self.video_token_id] = config.text_config.pad_token_id input_ids[input_ids == self.image_token_id] = config.text_config.pad_token_id input_ids[:, self.num_image_tokens] = self.image_token_id inputs_dict = { "pixel_values": pixel_values, "image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device), "image_merge_sizes": torch.tensor([1] * self.batch_size, device=torch_device), "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class VideoLlama3ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `VideoLlama3ForConditionalGeneration`. """ all_model_classes = ( ( VideoLlama3Model, VideoLlama3ForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = {"image-text-to-text": VideoLlama3ForConditionalGeneration} _is_composite = True def setUp(self): self.model_tester = VideoLlama3VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoLlama3Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompt has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) curr_input_dict = copy.deepcopy(input_dict) _ = model(**curr_input_dict) # successfull forward with no modifications # remove one image but leave the image token in text patch_size = config.vision_config.patch_size one_img_length = (self.model_tester.image_size**2) // (patch_size**2) curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...] curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...] curr_input_dict["image_merge_sizes"] = curr_input_dict["image_merge_sizes"][-1:, ...] with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"): _ = model(**curr_input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:one_img_length] image_grid_thw = curr_input_dict["image_grid_thw"][:1] image_merge_sizes = curr_input_dict["image_merge_sizes"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"): _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, image_merge_sizes=image_merge_sizes, ) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0) image_merge_sizes = torch.cat([image_merge_sizes, image_merge_sizes], dim=0) _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, image_merge_sizes=image_merge_sizes, ) def attention_mask_padding_matches_padding_free_with_position_ids( self, attn_implementation: str, fa_kwargs: bool = False ): max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, ) .to(torch_device) .eval() ) # flatten padfree_positions = torch.cat( [torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()] ) padfree_positions = padfree_positions.long().unsqueeze(0).to(torch_device) padfree_inputs_dict = { "pixel_values": inputs_dict["pixel_values"], "image_grid_thw": inputs_dict["image_grid_thw"], "image_merge_sizes": inputs_dict["image_merge_sizes"], "input_ids": inputs_dict["input_ids"][dummy_attention_mask.bool()].unsqueeze(0), "position_ids": padfree_positions, } if fa_kwargs: cu_seq_lens = [0] + dummy_attention_mask.sum(1).tolist() cu_seq_lens = torch.tensor(cu_seq_lens, device=torch_device) max_length = cu_seq_lens.diff().max().item() padfree_inputs_dict.update( { "cu_seq_lens_q": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "cu_seq_lens_k": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "max_length_q": max_length, "max_length_k": max_length, } ) # We need to do simple forward without cache in roder to trigger packed SDPA/FLEX/EAGER path res_padded = model(**inputs_dict, use_cache=False) res_padfree = model(**padfree_inputs_dict, use_cache=False) logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()] logits_padfree = res_padfree.logits[0] # acceptable numerical instability tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) @require_torch @slow class VideoLlama3IntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("lkhl/VideoLLaMA3-2B-Image-HF") self.messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Describe the image."}, ], } ] url = "https://github.com/DAMO-NLP-SG/VideoLLaMA3/raw/refs/heads/main/assets/sora.png" self.image = Image.open(requests.get(url, stream=True).raw) def tearDown(self): gc.collect() backend_empty_cache(torch_device) def test_small_model_integration_test(self): model = VideoLlama3ForConditionalGeneration.from_pretrained( "lkhl/VideoLLaMA3-2B-Image-HF", dtype=torch.bfloat16, device_map=torch_device ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt").to(torch_device) expected_input_ids = [151644, 872, 198] + [151655] * 10549 + [198, 74785, 279, 2168, 13, 151645, 198, 151644, 77091, 198] # fmt: skip self.assertEqual(expected_input_ids, inputs.input_ids[0].tolist()) expected_pixel_slice = torch.tensor( [ [-0.8588, -0.9216, -0.9608], [-0.9922, -0.9922, -0.9922], [-0.9686, -0.9686, -0.9294], [-0.9294, -0.9765, -0.9765], [-0.9922, -0.9922, -0.9843], [-0.6000, -0.4118, -0.3647], ], dtype=torch.float32, device=torch_device, ) torch.testing.assert_close(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=1e-4, rtol=1e-4) output = model.generate(**inputs, max_new_tokens=20, do_sample=False, repetition_penalty=None) # fmt: off EXPECTED_DECODED_TEXT = Expectations( { ("cuda", None): "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress", ("xpu", None): "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant night scene in a bustling Japanese city. A woman in a striking red dress", } ).get_expectation() # fmt: on self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) def test_small_model_integration_test_batch(self): model = VideoLlama3ForConditionalGeneration.from_pretrained( "lkhl/VideoLLaMA3-2B-Image-HF", dtype=torch.bfloat16, device_map=torch_device ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=20, do_sample=False, repetition_penalty=None) EXPECTED_DECODED_TEXT = [ "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress", "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress", ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) def test_small_model_integration_test_batch_wo_image(self): model = VideoLlama3ForConditionalGeneration.from_pretrained( "lkhl/VideoLLaMA3-2B-Image-HF", dtype=torch.bfloat16, device_map=torch_device ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "user", "content": [{"type": "text", "text": "What is relativity?"}]}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=[text, text2], images=[self.image], padding=True, padding_side="left", return_tensors="pt" ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=20, do_sample=False, repetition_penalty=None) # fmt: off EXPECTED_DECODED_TEXT = Expectations( { ("cuda", None): [ "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress", "user\nWhat is relativity?\nassistant\nRelativity is a scientific theory that describes the relationship between space and time. It was first proposed by", ], ("xpu", None): [ "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant night scene in a bustling Japanese city. A woman in a striking red dress", "user\nWhat is relativity?\nassistant\nRelativity is a scientific theory that describes the relationship between space and time. It was first proposed by", ], } ).get_expectation() # fmt: on self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) def test_small_model_integration_test_batch_different_resolutions(self): model = VideoLlama3ForConditionalGeneration.from_pretrained( "lkhl/VideoLLaMA3-2B-Image-HF", dtype=torch.bfloat16, device_map=torch_device ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) text2 = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) image2 = self.image.resize((224, 224)) inputs = self.processor( text=[text, text2], images=[self.image, image2], padding=True, padding_side="left", return_tensors="pt" ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=20, do_sample=False, repetition_penalty=None) DECODED_TEXT = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_DECODED_TEXT = [ "user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress", "user\n\nDescribe the image.\nassistant\nThe image depicts a striking urban scene at night. A person is standing in the center of a wet", ] # fmt: skip self.assertEqual(DECODED_TEXT, EXPECTED_DECODED_TEXT) @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_flashatt2(self): model = VideoLlama3ForConditionalGeneration.from_pretrained( "lkhl/VideoLLaMA3-2B-Image-HF", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map=torch_device, ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=20, do_sample=False, repetition_penalty=None) # fmt: off EXPECTED_DECODED_TEXTS = Expectations( { (None, None): ['user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress', 'user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress', ], ("xpu", 3): ['user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress', 'user\n\nDescribe the image.\nassistant\nThe image depicts a vibrant nighttime scene on a bustling city street. A woman in a striking red dress', ], } ) # fmt: on EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() DECODED_TEXT = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(DECODED_TEXT, EXPECTED_DECODED_TEXT) @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_wo_image_flashatt2(self): model = VideoLlama3ForConditionalGeneration.from_pretrained( "lkhl/VideoLLaMA3-2B-Image-HF", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map=torch_device, ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "user", "content": [{"type": "text", "text": "What is relativity?"}]}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=[text, text2], images=[self.image], padding=True, padding_side="left", return_tensors="pt" ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=20, do_sample=False, repetition_penalty=None) EXPECTED_DECODED_TEXT = [ 'user\n\nDescribe the image.\nassistant\nThe image captures a vibrant nighttime scene on a bustling city street. A woman in a striking red dress', 'user\nWhat is relativity?\nassistant\nRelativity is a scientific theory that describes the relationship between space and time. It was first proposed by' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/video_llama_3/test_modeling_video_llama_3.py", "license": "Apache License 2.0", "lines": 848, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/video_llama_3/test_processing_video_llama_3.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np from PIL import Image from transformers.testing_utils import require_av, require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import VideoLlama3Processor if is_torch_available(): import torch def prepare_image_inputs(): """This function prepares a list of PIL images""" image_inputs = [np.random.randint(255, size=(3, 15, 50), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs @require_vision @require_torch @require_torchvision class VideoLlama3ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = VideoLlama3Processor model_id = "lkhl/VideoLLaMA3-2B-Image-HF" @classmethod def _setup_from_pretrained(cls, model_id, **kwargs): return super()._setup_from_pretrained(model_id, patch_size=4, max_pixels=56 * 56, min_pixels=28 * 28, **kwargs) @classmethod def _setup_test_attributes(cls, processor): cls.image_token = processor.image_token def prepare_image_inputs(self, batch_size: int | None = None): """This function prepares a list of PIL images for testing""" if batch_size is None: return prepare_image_inputs()[0] if batch_size < 1: raise ValueError("batch_size must be greater than 0") return prepare_image_inputs() * batch_size # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) @require_torch @require_av def _test_apply_chat_template( self, modality: str, batch_size: int, return_tensors: str, input_name: str, processor_name: str, input_data: list[str], ): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") if processor_name not in self.processor_class.get_attributes(): self.skipTest(f"{processor_name} attribute not present in {self.processor_class}") batch_messages = [ [ { "role": "user", "content": [{"type": "text", "text": "Describe this."}], }, ] ] * batch_size # Test that jinja can be applied formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), batch_size) # Test that tokenizing with template and directly with `self.tokenizer` gives same output formatted_prompt_tokenized = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors ) add_special_tokens = True if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): add_special_tokens = False tok_output = processor.tokenizer( formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens ) expected_output = tok_output.input_ids self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) # Test that kwargs passed to processor's `__call__` are actually used tokenized_prompt_100 = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, padding="max_length", truncation=True, return_tensors=return_tensors, max_length=100, ) self.assertEqual(len(tokenized_prompt_100[0]), 100) # Test that `return_dict=True` returns text related inputs in the dict out_dict_text = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, ) self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"])) self.assertEqual(len(out_dict_text["input_ids"]), batch_size) self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict for idx, url in enumerate(input_data[:batch_size]): batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}] out_dict = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, num_frames=2, # by default no more than 2 frames, otherwise too slow ) input_name = getattr(self, input_name) self.assertTrue(input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), batch_size) self.assertEqual(len(out_dict["attention_mask"]), batch_size) if modality == "video": # qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw expected_video_token_count = 0 for thw in out_dict["video_grid_thw"]: expected_video_token_count += thw[0] * thw[1] * thw[2] mm_len = expected_video_token_count else: mm_len = batch_size * 192 self.assertEqual(len(out_dict[input_name]), mm_len) return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list} for k in out_dict: self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors]) @require_av def test_apply_chat_template_video_frame_sampling(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") messages = [ [ { "role": "user", "content": [ {"type": "video"}, {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), 1) formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"]) # Add video URL for return dict and load with `num_frames` arg messages[0][0]["content"][0] = { "type": "video", "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4", } num_frames = 3 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, num_frames=num_frames, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 180) # Load with `fps` arg fps = 1 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 80) # Load with `fps` and `num_frames` args, should raise an error with self.assertRaises(ValueError): out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, num_frames=num_frames, ) # Load without any arg should load the whole video out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1200) # Load video as a list of frames (i.e. images). NOTE: each frame should have same size # because we assume they come from one video messages[0][0]["content"][0] = { "type": "video", "url": [ "https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg", ], } out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 192) # When the inputs are frame URLs/paths we expect that those are already # sampled and will raise an error is asked to sample again. with self.assertRaisesRegex( ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`" ): out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, do_sample_frames=True, ) def test_kwargs_overrides_custom_image_processor_kwargs(self): processor = self.get_processor() self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertEqual(inputs[self.images_input_name].shape[0], 52) inputs = processor(text=input_str, images=image_input, max_pixels=56 * 56 * 4, return_tensors="pt") self.assertEqual(inputs[self.images_input_name].shape[0], 52) def test_special_mm_token_truncation(self): """Tests that special vision tokens do not get truncated when `truncation=True` is set.""" processor = self.get_processor() input_str = self.prepare_text_inputs(batch_size=2, modalities="image") image_input = self.prepare_image_inputs(batch_size=2) _ = processor( text=input_str, images=image_input, return_tensors="pt", truncation=None, padding=True, ) with self.assertRaises(ValueError): _ = processor( text=input_str, images=image_input, return_tensors="pt", truncation=True, padding=True, max_length=20, )
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/video_llama_3/test_processing_video_llama_3.py", "license": "Apache License 2.0", "lines": 278, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/video_llama_3/test_video_processing_video_llama_3.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import unittest import numpy as np from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers.image_utils import get_image_size from transformers.models.video_llama_3.video_processing_video_llama_3 import smart_resize if is_torchvision_available(): from transformers import VideoLlama3VideoProcessor class VideoLlama3VideoProcessingTester: def __init__( self, parent, batch_size=5, num_frames=8, num_channels=3, min_resolution=30, max_resolution=80, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_convert_rgb=True, temporal_patch_size=2, patch_size=14, min_pixels=20 * 20, max_pixels=100 * 100 * 8, merge_size=2, ): size = size if size is not None else {"shortest_edge": 400, "longest_edge": 80000} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_frames = num_frames self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.temporal_patch_size = temporal_patch_size self.patch_size = patch_size self.min_pixels = min_pixels self.max_pixels = max_pixels self.merge_size = merge_size def prepare_video_processor_dict(self): return { "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "temporal_patch_size": self.temporal_patch_size, "patch_size": self.patch_size, "min_pixels": self.min_pixels, "max_pixels": self.max_pixels, "merge_size": self.merge_size, } @require_vision def expected_output_video_shape(self, videos, num_frames=None): num_frames = num_frames if num_frames is not None else self.num_frames grid_t = num_frames // self.temporal_patch_size hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size seq_len = 0 for video in videos: if isinstance(video[0], Image.Image): video = np.stack([np.array(frame) for frame in video]) height, width = get_image_size(video) resized_height, resized_width = smart_resize( height, width, factor=self.patch_size * self.merge_size, min_pixels=self.min_pixels, max_pixels=self.max_pixels, ) grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size seq_len += grid_t * grid_h * grid_w return [seq_len, hidden_dim] def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"): videos = prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, return_tensors=return_tensors, ) return videos @require_torch @require_vision class VideoLlama3VideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = VideoLlama3VideoProcessor if is_torchvision_available() else None def setUp(self): super().setUp() self.video_processor_tester = VideoLlama3VideoProcessingTester(self) @property def video_processor_dict(self): return self.video_processor_tester.prepare_video_processor_dict() def test_video_processor_properties(self): video_processing = self.fast_video_processing_class(**self.video_processor_dict) self.assertTrue(hasattr(video_processing, "do_resize")) self.assertTrue(hasattr(video_processing, "size")) self.assertTrue(hasattr(video_processing, "do_center_crop")) self.assertTrue(hasattr(video_processing, "center_crop")) self.assertTrue(hasattr(video_processing, "do_normalize")) self.assertTrue(hasattr(video_processing, "image_mean")) self.assertTrue(hasattr(video_processing, "image_std")) self.assertTrue(hasattr(video_processing, "do_convert_rgb")) def test_video_processor_from_dict_with_kwargs(self): video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict) self.assertEqual(video_processor.size, {"shortest_edge": 400, "longest_edge": 80000}) self.assertEqual(video_processor.crop_size, {"height": 18, "width": 18}) video_processor = self.fast_video_processing_class.from_dict( self.video_processor_dict, size={"shortest_edge": 100, "longest_edge": 200} ) self.assertEqual(video_processor.size, {"shortest_edge": 100, "longest_edge": 200}) def test_video_processor_to_json_string(self): for video_processing_class in self.video_processor_list: video_processor = video_processing_class(**self.video_processor_dict) obj = json.loads(video_processor.to_json_string()) for key, value in self.video_processor_dict.items(): if key not in ["min_pixels", "max_pixels"]: self.assertEqual(obj[key], value) def test_call_pil(self): for video_processing_class in self.video_processor_list: # Initialize video_processing video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="pil" ) # Each video is a list of PIL Images for video in video_inputs: self.assertIsInstance(video[0], Image.Image) # Test not batched input encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_numpy(self): for video_processing_class in self.video_processor_list: # Initialize video_processing video_processing = video_processing_class(**self.video_processor_dict) # create random numpy tensors video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) for video in video_inputs: self.assertIsInstance(video, np.ndarray) # Test not batched input encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_pytorch(self): for video_processing_class in self.video_processor_list: # Initialize video_processing video_processing = video_processing_class(**self.video_processor_dict) # create random PyTorch tensors video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="torch" ) for video in video_inputs: self.assertIsInstance(video, torch.Tensor) # Test not batched input encoded_videos = video_processing(video_inputs[0], return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) encoded_videos = video_processing(video_inputs, return_tensors="pt")[self.input_name] self.assertEqual( list(encoded_videos.shape), expected_output_video_shape, ) def test_nested_input(self): """Tests that the processor can work with nested list where each video is a list of arrays""" for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) # Test not batched input video_inputs_nested = [list(video) for video in video_inputs] encoded_videos = video_processing(video_inputs_nested[0], return_tensors="pt")[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) encoded_videos = video_processing(video_inputs_nested, return_tensors="pt")[self.input_name] self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) @unittest.skip("Skip for now, the test needs adjustment fo Qwen2VL") def test_call_numpy_4_channels(self): for video_processing_class in self.video_processor_list: # Test that can process videos which have an arbitrary number of channels # Initialize video_processing video_processor = video_processing_class(**self.video_processor_dict) # create random numpy tensors self.video_processor_tester.num_channels = 4 video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) # Test not batched input encoded_videos = video_processor( video_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processor( video_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_sample_frames(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) prev_num_frames = self.video_processor_tester.num_frames self.video_processor_tester.num_frames = 8 video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="torch", ) # Force set sampling to False. No sampling is expected even when `num_frames` exists video_processing.do_sample_frames = False encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=3)[self.input_name] encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=3)[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape) self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched) # Set sampling to True. Video frames should be sampled with `num_frames` in the output video_processing.do_sample_frames = True encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=4)[self.input_name] encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=4)[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape( [video_inputs[0]], num_frames=4 ) expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape( video_inputs, num_frames=4 ) self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape) self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched) metadata = [[{"duration": 2.0, "total_num_frames": 8, "fps": 4}]] batched_metadata = metadata * len(video_inputs) encoded_videos = video_processing(video_inputs[0], return_tensors="pt", fps=3, video_metadata=metadata)[ self.input_name ] encoded_videos_batched = video_processing( video_inputs, return_tensors="pt", fps=3, video_metadata=batched_metadata )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape( [video_inputs[0]], num_frames=6 ) expected_output_video_shape_batched = self.video_processor_tester.expected_output_video_shape( video_inputs, num_frames=6 ) self.assertListEqual(list(encoded_videos.shape), expected_output_video_shape) self.assertListEqual(list(encoded_videos_batched.shape), expected_output_video_shape_batched) # We should raise error when asked to sample more frames than there are in input video with self.assertRaises(ValueError): encoded_videos = video_processing(video_inputs[0], return_tensors="pt", num_frames=10)[self.input_name] encoded_videos_batched = video_processing(video_inputs, return_tensors="pt", num_frames=10)[ self.input_name ] # Assign back the actual num frames in tester self.video_processor_tester.num_frames = prev_num_frames
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/video_llama_3/test_video_processing_video_llama_3.py", "license": "Apache License 2.0", "lines": 310, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/videomae/video_processing_videomae.py
# Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Video processor class for VideoMAE.""" from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling from ...video_processing_utils import BaseVideoProcessor class VideoMAEVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"shortest_edge": 224} default_to_square = False crop_size = {"height": 224, "width": 224} do_resize = True do_center_crop = True do_rescale = True rescale_factor = 1 / 255 do_normalize = True do_convert_rgb = True do_sample_frames = False # Set to False for backward compatibility with image processor workflows. model_input_names = ["pixel_values"] def preprocess(self, videos, **kwargs): batch = super().preprocess(videos, **kwargs) batch["pixel_values"] = batch.pop("pixel_values_videos") return batch __all__ = ["VideoMAEVideoProcessor"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/videomae/video_processing_videomae.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/videomae/test_video_processing_videomae.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from PIL import Image from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from transformers.testing_utils import require_torch, require_torchvision, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs if is_vision_available(): if is_torchvision_available(): from transformers import VideoMAEImageProcessor, VideoMAEVideoProcessor class VideoMAEVideoProcessingTester: def __init__( self, parent, batch_size=5, num_frames=8, num_channels=3, image_size=18, min_resolution=30, max_resolution=80, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_frames = num_frames self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_video_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_video_shape(self, videos): return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"): videos = prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, return_tensors=return_tensors, ) return videos @require_torch @require_vision @require_torchvision class VideoMAEVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = VideoMAEVideoProcessor if is_torchvision_available() else None input_name = "pixel_values" def setUp(self): super().setUp() self.video_processor_tester = VideoMAEVideoProcessingTester(self) @property def video_processor_dict(self): return self.video_processor_tester.prepare_video_processor_dict() def test_video_processor_properties(self): video_processing = self.fast_video_processing_class(**self.video_processor_dict) self.assertTrue(hasattr(video_processing, "do_resize")) self.assertTrue(hasattr(video_processing, "size")) self.assertTrue(hasattr(video_processing, "do_center_crop")) self.assertTrue(hasattr(video_processing, "center_crop")) self.assertTrue(hasattr(video_processing, "do_normalize")) self.assertTrue(hasattr(video_processing, "image_mean")) self.assertTrue(hasattr(video_processing, "image_std")) self.assertTrue(hasattr(video_processing, "do_convert_rgb")) self.assertTrue(hasattr(video_processing, "model_input_names")) self.assertIn("pixel_values", video_processing.model_input_names) def test_pixel_value_identity(self): """ Verify that VideoMAEVideoProcessor (TorchCodec-based) produces pixel tensors numerically similar to those from VideoMAEImageProcessor (PIL-based). Minor (<1%) differences are expected due to color conversion and interpolation. """ video = self.video_processor_tester.prepare_video_inputs(return_tensors="np") video_processor = VideoMAEVideoProcessor(**self.video_processor_dict) image_processor = VideoMAEImageProcessor(**self.video_processor_dict) video_frames_np = video[0] video_frames_pil = [Image.fromarray(frame.astype("uint8")) for frame in video_frames_np] video_out = video_processor(video_frames_pil, return_tensors="pt") image_out = image_processor(video_frames_pil, return_tensors="pt") torch.testing.assert_close( video_out["pixel_values"], image_out["pixel_values"], rtol=5e-2, atol=1e-2, msg=( "Pixel values differ slightly between VideoMAEVideoProcessor " "and VideoMAEImageProcessor. " "Differences ≤1% are expected due to YUV→RGB conversion and " "interpolation behavior in different decoders." ), )
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/videomae/test_video_processing_videomae.py", "license": "Apache License 2.0", "lines": 139, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:utils/modular_integrations.py
import os import libcst as cst # Files from external libraries that should not be tracked # E.g. for habana, we don't want to track the dependencies from `modeling_all_models.py` as it is not part of the transformers library EXCLUDED_EXTERNAL_FILES = { "habana": [{"name": "modeling_all_models", "type": "modeling"}], } def convert_relative_import_to_absolute( import_node: cst.ImportFrom, file_path: str, package_name: str | None = "transformers", ) -> cst.ImportFrom: """ Convert a relative libcst.ImportFrom node into an absolute one, using the file path and package name. Args: import_node: A relative import node (e.g. `from ..utils import helper`) file_path: Path to the file containing the import (can be absolute or relative) package_name: The top-level package name (e.g. 'myproject') Returns: A new ImportFrom node with the absolute import path """ if not (import_node.relative and len(import_node.relative) > 0): return import_node # Already absolute file_path = os.path.abspath(file_path) rel_level = len(import_node.relative) # Strip file extension and split into parts file_path_no_ext = file_path.removesuffix(".py") file_parts = file_path_no_ext.split(os.path.sep) # Ensure the file path includes the package name if package_name not in file_parts: raise ValueError(f"Package name '{package_name}' not found in file path '{file_path}'") # Slice file_parts starting from the package name pkg_index = file_parts.index(package_name) module_parts = file_parts[pkg_index + 1 :] # e.g. ['module', 'submodule', 'foo'] if len(module_parts) < rel_level: raise ValueError(f"Relative import level ({rel_level}) goes beyond package root.") base_parts = module_parts[:-rel_level] # Flatten the module being imported (if any) def flatten_module(module: cst.BaseExpression | None) -> list[str]: if not module: return [] if isinstance(module, cst.Name): return [module.value] elif isinstance(module, cst.Attribute): parts = [] while isinstance(module, cst.Attribute): parts.insert(0, module.attr.value) module = module.value if isinstance(module, cst.Name): parts.insert(0, module.value) return parts return [] import_parts = flatten_module(import_node.module) # Combine to get the full absolute import path full_parts = [package_name] + base_parts + import_parts # Handle special case where the import comes from a namespace package (e.g. optimum with `optimum.habana`, `optimum.intel` instead of `src.optimum`) if package_name != "transformers" and file_parts[pkg_index - 1] != "src": full_parts = [file_parts[pkg_index - 1]] + full_parts # Build the dotted module path dotted_module: cst.BaseExpression | None = None for part in full_parts: name = cst.Name(part) dotted_module = name if dotted_module is None else cst.Attribute(value=dotted_module, attr=name) # Return a new ImportFrom node with absolute import return import_node.with_changes(module=dotted_module, relative=[]) def convert_to_relative_import(import_node: cst.ImportFrom, file_path: str, package_name: str) -> cst.ImportFrom: """ Convert an absolute import to a relative one if it belongs to `package_name`. Parameters: - node: The ImportFrom node to possibly transform. - file_path: Absolute path to the file containing the import (e.g., '/path/to/mypackage/foo/bar.py'). - package_name: The top-level package name (e.g., 'mypackage'). Returns: - A possibly modified ImportFrom node. """ if import_node.relative: return import_node # Already relative import # Extract module name string from ImportFrom def get_module_name(module): if isinstance(module, cst.Name): return module.value, [module.value] elif isinstance(module, cst.Attribute): parts = [] while isinstance(module, cst.Attribute): parts.append(module.attr.value) module = module.value if isinstance(module, cst.Name): parts.append(module.value) parts.reverse() return ".".join(parts), parts return "", None module_name, submodule_list = get_module_name(import_node.module) # Check if it's from the target package if ( not (module_name.startswith(package_name + ".") or module_name.startswith("optimum." + package_name + ".")) and module_name != package_name ): return import_node # Not from target package # Locate the package root inside the file path norm_file_path = os.path.normpath(file_path) parts = norm_file_path.split(os.sep) try: pkg_index = parts.index(package_name) except ValueError: # Package name not found in path — assume we can't resolve relative depth return import_node # Depth is how many directories after the package name before the current file depth = len(parts) - pkg_index - 1 # exclude the .py file itself for i, submodule in enumerate(parts[pkg_index + 1 :]): if submodule == submodule_list[2 + i]: depth -= 1 else: break # Create the correct number of dots relative = [cst.Dot()] * depth if depth > 0 else [cst.Dot()] # Strip package prefix from import module path if module_name.startswith("optimum." + package_name + "."): stripped_name = module_name[len("optimum." + package_name) :].lstrip(".") else: stripped_name = module_name[len(package_name) :].lstrip(".") # Build new module node if stripped_name == "": new_module = None else: name_parts = stripped_name.split(".")[i:] new_module = cst.Name(name_parts[0]) for part in name_parts[1:]: new_module = cst.Attribute(value=new_module, attr=cst.Name(part)) return import_node.with_changes(module=new_module, relative=relative) class AbsoluteImportTransformer(cst.CSTTransformer): def __init__(self, relative_path: str, source_library: str): super().__init__() self.relative_path = relative_path self.source_library = source_library def leave_ImportFrom(self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom) -> cst.ImportFrom: return convert_relative_import_to_absolute( import_node=updated_node, file_path=self.relative_path, package_name=self.source_library ) class RelativeImportTransformer(cst.CSTTransformer): def __init__(self, relative_path: str, source_library: str): super().__init__() self.relative_path = relative_path self.source_library = source_library def leave_ImportFrom(self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom) -> cst.ImportFrom: return convert_to_relative_import(updated_node, self.relative_path, self.source_library)
{ "repo_id": "huggingface/transformers", "file_path": "utils/modular_integrations.py", "license": "Apache License 2.0", "lines": 146, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/transformers:src/transformers/models/mllama/image_processing_mllama_fast.py
# Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch import torchvision.transforms.v2.functional as tvF from PIL import Image from ...image_processing_utils_fast import ( BaseImageProcessorFast, BatchFeature, ImageInput, SizeDict, TensorType, Unpack, group_images_by_shape, reorder_images, ) from ...image_transforms import split_to_tiles from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling, make_nested_list_of_images, ) from ...utils import auto_docstring from .image_processing_mllama import ( MllamaImageProcessorKwargs, get_all_supported_aspect_ratios, get_image_size_fit_to_canvas, get_optimal_tiled_canvas, ) def _validate_size(size: SizeDict) -> None: if not (size.height and size.width): raise ValueError(f"Argument `size` must be a dictionary with keys 'height' and 'width'. Got: {size}") if size.height != size.width: raise ValueError(f"Argument `size` must have the same height and width, got {size}") def _validate_mllama_preprocess_arguments(do_resize, size, do_pad, max_image_tiles): if not do_pad: raise ValueError("MllamaImageProcessor doesn't support `do_pad=False` mode.") if not do_resize: raise ValueError("MllamaImageProcessor doesn't support `do_resize=False` mode.") if max_image_tiles is None or max_image_tiles <= 0: raise ValueError(f"MllamaImageProcessor `max_image_tiles` must be a positive integer, got {max_image_tiles}.") _validate_size(size) def build_aspect_ratio_mask(aspect_ratios: list[tuple[int, int]], max_image_tiles: int) -> "torch.Tensor": """ Builds a mask for the aspect ratios of the images. Args: aspect_ratios (`List[List[Tuple[int, int]]]`): A list of lists containing aspect ratios for each image in the batch. Each aspect ratio is represented as a tuple of (width, height) in terms of number of tiles. max_image_tiles (`int`): The maximum number of tiles any image can be split into. Returns: `torch.Tensor`: A 3D torch.Tensor of shape (batch_size, max_num_images, max_image_tiles). The mask contains 1s for valid tiles and 0s for padding. """ batch_size = len(aspect_ratios) max_num_images = max(len(row) for row in aspect_ratios) aspect_ratio_mask = torch.zeros((batch_size, max_num_images, max_image_tiles), dtype=torch.long) # Set the first tile to 1 for all aspect ratios # because in original implementation aspect ratios are padded with (1, 1), # but original code examples are not built to handle batches, so we might remove it later aspect_ratio_mask[:, :, 0] = 1 # Set the aspect ratio mask for the rest of the tiles for i, sample_aspect_ratios in enumerate(aspect_ratios): for j, (num_tiles_w, num_tiles_h) in enumerate(sample_aspect_ratios): aspect_ratio_mask[i, j, : num_tiles_w * num_tiles_h] = 1 return aspect_ratio_mask def pad_batches_and_tiles( batch_images: list[list["torch.Tensor"]], max_image_tiles: int, ) -> tuple["torch.Tensor", list[list[int]]]: """ Stack a list of lists of images with variable lengths into a torch.Tensor, applying zero padding as needed. Each list in the input represents a batch sample, and each image within a list is expected to be pre-split into tiles. The resulting array will have a shape of (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width). Args: batch_images (`List[List[torch.Tensor]]`): A list of lists of image tiles. Each inner list represents a batch sample containing multiple images, where each image is pre-split into tiles. The shape of each tile array is (num_tiles, channels, tile_height, tile_width). max_image_tiles (int): The maximum number of tiles any image was potantially split. Returns: `Tuple[torch.Tensor, List[List[int]]]`: A tuple containing: - stacked_images (`torch.Tensor`): A numpy array of stacked images with shape (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width). - all_num_tiles (`List[List[int]]`): A list of lists containing the number of tiles for each image in each batch sample. """ # Determine output shape batch_size = len(batch_images) max_num_images = max(len(images) for images in batch_images) shapes = [image.shape for images in batch_images for image in images] _, channels, tile_height, tile_width = shapes[0] # Initialize the stacked images array with zeros stacked_images = torch.zeros( (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width), dtype=torch.float32, ) # Fill the stacked images array with the tiled images from the batch all_num_tiles = [] for i, images in enumerate(batch_images): num_sample_tiles = [] for j, image in enumerate(images): num_tiles = image.shape[0] stacked_images[i, j, :num_tiles] = image num_sample_tiles.append(num_tiles) all_num_tiles.append(num_sample_tiles) return stacked_images, all_num_tiles def convert_aspect_ratios_to_ids(aspect_ratios: list[list[tuple[int, int]]], max_image_tiles: int) -> "torch.Tensor": """ Convert aspect ratio tuples to unique ids. For batch padding we use 0, because there might be different number of images in each batch. The aspect ratio ids start from 1, with 1 corresponding to the first supported aspect ratio. Args: aspect_ratios (`List[List[Tuple[int, int]]]`): A list of aspect ratios for each image in the batch. max_image_tiles (`int`): The maximum number of tiles any image can be split into. Returns: `torch.Tensor`: The aspect ratios ids as a numpy array with shape (batch_size, max_num_images). Each id corresponds to the index of the aspect ratio in the list of supported aspect ratios, offset by 1 (so 0 can be used for padding). """ batch_size = len(aspect_ratios) max_num_images = max(len(row) for row in aspect_ratios) supported_aspect_ratios = get_all_supported_aspect_ratios(max_image_tiles) aspect_ratios_ids = torch.zeros((batch_size, max_num_images), dtype=torch.long) for i, sample_aspect_ratios in enumerate(aspect_ratios): for j, (num_tiles_h, num_tiles_w) in enumerate(sample_aspect_ratios): aspect_ratios_ids[i, j] = supported_aspect_ratios.index((num_tiles_h, num_tiles_w)) + 1 return aspect_ratios_ids # Copied from transformers.models.idefics2.image_processing_idefics2.convert_to_rgb def convert_to_rgb(image: ImageInput) -> ImageInput: """ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (Image): The image to convert. """ if not isinstance(image, Image.Image): return image # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background # for transparent images. The call to `alpha_composite` handles this case if image.mode == "RGB": return image image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") return alpha_composite @auto_docstring class MllamaImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"height": 224, "width": 224} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True max_image_tiles = 4 valid_kwargs = MllamaImageProcessorKwargs model_input_names = ["pixel_values", "num_tiles", "aspect_ratio_ids", "aspect_ratio_mask"] def __init__(self, **kwargs: Unpack[MllamaImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[MllamaImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def _prepare_images_structure(self, images: ImageInput, expected_ndims: int = 3) -> ImageInput: """ Prepare a nested images structure for processing. """ images = self.fetch_images(images) return make_nested_list_of_images(images, expected_ndims=expected_ndims) def convert_to_rgb( self, image: ImageInput, ) -> ImageInput: """ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (ImageInput): The image to convert. Returns: ImageInput: The converted image. """ return convert_to_rgb(image) def pad( self, image: "torch.Tensor", size: dict[str, int], aspect_ratio: tuple[int, int], ) -> "torch.Tensor": """ Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is (1, 2), the image will be padded to 224x448. Args: image (`torch.Tensor`): Image to resize. size (`Dict[str, int]`): Size of the output image. aspect_ratio (`Tuple[int, int]`): The aspect ratio of the image. Returns: `torch.Tensor`: The padded image. """ image_height, image_width = image.shape[-2:] num_tiles_height, num_tiles_width = aspect_ratio padded_height = num_tiles_height * size.height padded_width = num_tiles_width * size.width pad_size = (0, 0, padded_width - image_width, padded_height - image_height) image = tvF.pad( image, pad_size, fill=0, ) return image def resize( self, image: "torch.Tensor", size: SizeDict, max_image_tiles: int, interpolation: "tvF.InterpolationMode" = None, antialias: bool = True, ) -> Union["torch.Tensor", tuple[int, int]]: """ Resizes an image to fit within a tiled canvas while maintaining its aspect ratio. The optimal canvas size is calculated based on the maximum number of tiles and the tile size. The function first determines the best tile arrangement for the image, then resizes the image to fit within this canvas. The resized image and the number of tiles along the height and width dimensions are returned. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. max_image_tiles (`int`): The maximum number of tiles to split the image into. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. Returns: `Union[np.ndarray, Tuple[int, int]]`: The resized image and a tuple containing the number of tiles along the height and width dimensions. """ image_height, image_width = image.shape[-2:] tile_size = size.height canvas_height, canvas_width = get_optimal_tiled_canvas( image_height=image_height, image_width=image_width, max_image_tiles=max_image_tiles, tile_size=tile_size, ) num_tiles_height = canvas_height // tile_size num_tiles_width = canvas_width // tile_size new_height, new_width = get_image_size_fit_to_canvas( image_height=image_height, image_width=image_width, canvas_height=canvas_height, canvas_width=canvas_width, tile_size=tile_size, ) image = tvF.resize(image, (new_height, new_width), interpolation=interpolation, antialias=antialias) return image, (num_tiles_height, num_tiles_width) def _preprocess( self, images: list["torch.Tensor"], size: SizeDict, interpolation: Optional["tvF.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: float | list[float] | None, image_std: float | list[float] | None, max_image_tiles: int | None, return_tensors: str | TensorType | None, disable_grouping: bool | None, **kwargs, ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape( images, is_nested=True, disable_grouping=disable_grouping ) split_images_grouped = {} aspect_ratio_grouped = {} for shape, stacked_images in grouped_images.items(): stacked_images, aspect_ratio = self.resize( image=stacked_images, size=size, interpolation=interpolation, max_image_tiles=max_image_tiles ) stacked_images = self.pad( image=stacked_images, size=size, aspect_ratio=aspect_ratio, ) num_tiles_height, num_tiles_width = aspect_ratio aspect_ratio_grouped[shape] = [aspect_ratio] * len(stacked_images) # same aspect ratio for all images in the batch split_images = split_to_tiles(stacked_images, num_tiles_height, num_tiles_width) # Fused rescale and normalize split_images = self.rescale_and_normalize( split_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) split_images_grouped[shape] = split_images split_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True) aspect_ratios = reorder_images(aspect_ratio_grouped, grouped_images_index, is_nested=True) split_images, num_tiles = pad_batches_and_tiles(split_images, max_image_tiles) aspect_ratio_ids = convert_aspect_ratios_to_ids(aspect_ratios, max_image_tiles=max_image_tiles) aspect_ratio_mask = build_aspect_ratio_mask(aspect_ratios, max_image_tiles=max_image_tiles) encoded_inputs = BatchFeature( data={ "pixel_values": split_images, "aspect_ratio_ids": aspect_ratio_ids, "aspect_ratio_mask": aspect_ratio_mask, }, tensor_type=return_tensors, ) encoded_inputs["num_tiles"] = num_tiles return encoded_inputs __all__ = ["MllamaImageProcessorFast"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/mllama/image_processing_mllama_fast.py", "license": "Apache License 2.0", "lines": 338, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/cwm/modular_cwm.py
# Copyright 2025 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ...cache_utils import Cache, DynamicCache from ...configuration_utils import layer_type_validation from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_outputs import BaseModelOutputWithPast from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ..llama.configuration_llama import LlamaConfig from ..llama.modeling_llama import ( LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, LlamaPreTrainedModel, ) from ..qwen2.modeling_qwen2 import Qwen2Attention, Qwen2RotaryEmbedding logger = logging.get_logger(__name__) class CwmConfig(LlamaConfig): """ Configuration for Code World Model (CWM). This is an inherited Llama3-compatible configuration with layer-interleaved sliding-window attention. Configures a `CwmModel`. Designed to yield a configuration mirroring the model in the [facebook/cwm](https://huggingface.co/facebook/cwm) architecture by default. Other models include: - [facebook/cwm-sft](https://huggingface.co/facebook/cwm-sft) - [facebook/cwm-pretrain](https://huggingface.co/facebook/cwm-pretrain) Args: vocab_size (`int`, *optional*, defaults to 128256): Vocabulary size of the CWM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CwmModel`] hidden_size (`int`, *optional*, defaults to 6144): Dimension of the hidden representations intermediate_size (`int`, *optional*, defaults to 21504): Dimension of the MLP representations num_hidden_layers (`int`, *optional*, defaults to 64): Number of hidden layers in the Transformer decoder num_attention_heads (`int`, *optional*, defaults to 48): Number of attention heads for each attention layer in the Transformer decoder num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention (GQA). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 128): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. CWM's attention allows sequence lengths up to 131072 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. eos_token_id (`int` or `list[int]`, *optional*, defaults to `[128001, 128008, 128009]`): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. bos_token_id (`int`, *optional*, defaults to 128000): The id of the *beginning-of-sequence* token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. pretraining_tp (`int`, *optional*, defaults to 1): Tensor parallelism degree used during pretraining. See [this document](https://huggingface.co/docs/transformers/parallelism) and [this issue](https://github.com/pytorch/pytorch/issues/76232). mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. sliding_window (`int`, *optional*, defaults to 8192): Sliding window attention window size. layer_types (`List[str]`, *optional*): List of layer types for each layer. Each element should be either "full_attention" or "sliding_attention". If not specified, will default to alternating pattern based on the provided window pattern. """ model_type = "cwm" default_theta = 1_000_000.0 def __init__( self, vocab_size: int = 128256, hidden_size: int = 6144, intermediate_size: int = 21504, num_hidden_layers: int = 64, num_attention_heads: int = 48, num_key_value_heads: int = 8, head_dim: int = 128, hidden_act: str = "silu", max_position_embeddings: int = 131072, initializer_range: float = 0.02, rms_norm_eps: float = 1e-5, use_cache: bool = True, pad_token_id: int | None = None, eos_token_id=[128001, 128008, 128009], bos_token_id: int = 128000, tie_word_embeddings: bool = False, attention_dropout: float = 0.0, pretraining_tp: int = 1, mlp_bias: bool = False, rope_parameters: dict | None = None, # CWM interleaved sliding window fields sliding_window: int = 8192, layer_types: list[str] | None = None, # ["full_attention"|"sliding_attention"] per layer **kwargs, ): if rope_parameters is None: rope_parameters = { "rope_theta": 1_000_000.0, "factor": 16.0, "high_freq_factor": 4.0, "low_freq_factor": 1.0, "original_max_position_embeddings": 8192, "rope_type": "llama3", } if layer_types is None: # Default pattern: every 4th layer uses full attention, others use sliding attention window_pattern = 4 layer_types = [ ("full_attention" if (i % window_pattern == 0) else "sliding_attention") for i in range(num_hidden_layers) ] else: layer_type_validation(layer_types, num_hidden_layers) self.sliding_window = int(sliding_window) if sliding_window else None self.layer_types = list(layer_types) super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, head_dim=head_dim, hidden_act=hidden_act, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, rms_norm_eps=rms_norm_eps, use_cache=use_cache, pad_token_id=pad_token_id, eos_token_id=list(eos_token_id), bos_token_id=bos_token_id, tie_word_embeddings=tie_word_embeddings, attention_bias=False, attention_dropout=attention_dropout, rope_parameters=rope_parameters, pretraining_tp=pretraining_tp, mlp_bias=mlp_bias, **kwargs, ) # CWM models don't use attention bias, remove it from config del self.attention_bias class CwmRotaryEmbedding(Qwen2RotaryEmbedding): pass class CwmAttention(Qwen2Attention): def __init__(self, config: CwmConfig, layer_idx: int): super().__init__(config=config, layer_idx=layer_idx) self.q_proj = torch.nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = torch.nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = torch.nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) class CwmDecoderLayer(LlamaDecoderLayer): def __init__(self, config: CwmConfig, layer_idx: int): super().__init__(config=config, layer_idx=layer_idx) self.attention_type = config.layer_types[layer_idx] self.self_attn = CwmAttention(config=config, layer_idx=layer_idx) class CwmPreTrainedModel(LlamaPreTrainedModel): pass class CwmModelOutputWithPast(BaseModelOutputWithPast): pass class CwmModel(LlamaModel): config_class = CwmConfig def __init__(self, config: CwmConfig): super().__init__(config) self.layers = torch.nn.ModuleList( [CwmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, cache_position: torch.LongTensor | None = None, use_cache: bool | None = None, **kwargs: Unpack[TransformersKwargs], ) -> CwmModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = ( torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens ) if position_ids is None: position_ids = cache_position.unsqueeze(0) if not isinstance(causal_mask_mapping := attention_mask, dict): mask_kwargs = { "config": self.config, "inputs_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } sliding_mask_kwargs = mask_kwargs.copy() causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**sliding_mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return CwmModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class CwmForCausalLM(LlamaForCausalLM): pass __all__ = [ "CwmConfig", "CwmPreTrainedModel", "CwmModel", "CwmForCausalLM", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/cwm/modular_cwm.py", "license": "Apache License 2.0", "lines": 254, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/cwm/test_configuration_cwm.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.models.cwm import CwmConfig from transformers.testing_utils import require_torch from ...test_configuration_common import ConfigTester class CwmConfigTest(unittest.TestCase): def test_default_config(self): """Test default CWM configuration""" config = CwmConfig() # CWM defaults self.assertEqual(config.sliding_window, 8192) self.assertIsInstance(config.layer_types, list) # Llama3 defaults self.assertEqual(config.vocab_size, 128256) self.assertIsNotNone(config.rope_parameters) self.assertEqual(config.rope_parameters["rope_type"], "llama3") def test_custom_sliding_window_config(self): config = CwmConfig(sliding_window=4096) self.assertEqual(config.sliding_window, 4096) def test_custom_layer_types_config(self): layer_types = ["full_attention", "sliding_attention", "sliding_attention", "full_attention"] config = CwmConfig(num_hidden_layers=4, layer_types=layer_types) self.assertEqual(config.layer_types, layer_types) self.assertEqual(len(config.layer_types), config.num_hidden_layers) def test_invalid_layer_types_length(self): with self.assertRaises(ValueError): CwmConfig( num_hidden_layers=4, layer_types=["full_attention", "sliding_attention"], # Only 2 types for 4 layers ) def test_invalid_layer_type_value(self): with self.assertRaises(ValueError): CwmConfig(num_hidden_layers=2, layer_types=["full_attention", "invalid_attention"]) def test_automatic_layer_types_generation(self): # Test default pattern (every 4th layer uses full attention) config = CwmConfig(num_hidden_layers=8) expected_types = [ "full_attention", # layer 0: 0 % 4 == 0 "sliding_attention", # layer 1: 1 % 4 != 0 "sliding_attention", # layer 2: 2 % 4 != 0 "sliding_attention", # layer 3: 3 % 4 != 0 "full_attention", # layer 4: 4 % 4 == 0 "sliding_attention", # layer 5: 5 % 4 != 0 "sliding_attention", # layer 6: 6 % 4 != 0 "sliding_attention", # layer 7: 7 % 4 != 0 ] self.assertEqual(config.layer_types, expected_types) def test_rope_parameters_config(self): custom_rope_parameters = { "factor": 8.0, "high_freq_factor": 2.0, "low_freq_factor": 0.5, "original_max_position_embeddings": 4096, "rope_type": "llama3", "rope_theta": 1_000_000.0, } config = CwmConfig(rope_parameters=custom_rope_parameters) self.assertEqual(config.rope_parameters, custom_rope_parameters) def test_config_serialization(self): config = CwmConfig( sliding_window=4096, layer_types=["full_attention", "sliding_attention"] * 3, num_hidden_layers=6, ) config_dict = config.to_dict() self.assertIn("sliding_window", config_dict) self.assertIn("layer_types", config_dict) new_config = CwmConfig.from_dict(config_dict) self.assertEqual(new_config.sliding_window, config.sliding_window) self.assertEqual(new_config.layer_types, config.layer_types) def test_config_inheritance_from_llama(self): config = CwmConfig() # Llama config attributes self.assertTrue(hasattr(config, "hidden_size")) self.assertTrue(hasattr(config, "num_attention_heads")) self.assertTrue(hasattr(config, "num_key_value_heads")) self.assertTrue(hasattr(config, "intermediate_size")) self.assertTrue(hasattr(config, "rope_parameters")) self.assertTrue(hasattr(config, "attention_dropout")) @require_torch class CwmConfigTester(ConfigTester): def __init__(self, parent, config_class=None, **kwargs): super().__init__(parent, config_class=config_class, **kwargs) def test_config(self): config_class = CwmConfig self.config_tester = ConfigTester(self, config_class=config_class) self.config_tester.run_common_tests()
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/cwm/test_configuration_cwm.py", "license": "Apache License 2.0", "lines": 99, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/cwm/test_modeling_cwm.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import ( Expectations, cleanup, require_deterministic_for_xpu, require_torch, require_torch_accelerator, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers.models.cwm import ( CwmConfig, CwmForCausalLM, CwmModel, ) class CwmModelTester(CausalLMModelTester): if is_torch_available(): config_class = CwmConfig base_model_class = CwmModel causal_lm_class = CwmForCausalLM def get_config(self): config = super().get_config() config.sliding_window = 8192 config.rope_parameters = { "factor": 16.0, "high_freq_factor": 4.0, "low_freq_factor": 1.0, "original_max_position_embeddings": 8192, "rope_type": "llama3", "rope_theta": 1000000.0, } return config @require_torch class CwmModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( CwmModel, CwmForCausalLM, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": CwmModel, "text-generation": CwmForCausalLM, } if is_torch_available() else {} ) model_tester_class = CwmModelTester model_split_percents = [0.5, 0.7, 0.8] _torch_compile_train_cls = CwmForCausalLM if is_torch_available() else None @require_torch_accelerator @slow class CwmIntegrationTest(unittest.TestCase): def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_deterministic_for_xpu def test_cwm_integration(self): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/cwm") model = CwmForCausalLM.from_pretrained("facebook/cwm", device_map="auto", dtype=torch.bfloat16) self.assertIsNotNone(model.config.sliding_window) self.assertIsNotNone(model.config.layer_types) self.assertIn("full_attention", model.config.layer_types) self.assertIn("sliding_attention", model.config.layer_types) for i, layer in enumerate(model.model.layers): expected_type = model.config.layer_types[i] self.assertEqual(layer.attention_type, expected_type) if expected_type == "sliding_attention": self.assertEqual(layer.self_attn.sliding_window, model.config.sliding_window) prompt = "def quicksort(arr):" inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): out = model(**inputs) # fmt: off expected_logits = Expectations( { ("cuda", None): torch.tensor( [0.5625, 2.9531, 9.1875, 0.5039, -0.3262, 2.2344, 3.0312, 1.5312, 0.5664, 1.5625, 2.7656, 3.4219, 2.0312, 2.1719, 1.5391, 2.5469, 2.8281, 1.8125, 1.7109, 1.3906, 1.0391, 0.1621, 0.4277, 0.1455, -0.1230, 0.8477, 2.2344, 5.2188, 1.2969, 1.5547, 0.8516, 0.7148] ), ("xpu", None): torch.Tensor( [0.5625, 2.9688, 9.1875, 0.4766, -0.3574, 2.2344, 3.0156, 1.4922, 0.5625, 1.5547, 2.7656, 3.4062, 2.0156, 2.1719, 1.5469, 2.5156, 2.8125, 1.7891, 1.7031, 1.3828, 1.0312, 0.1602, 0.4277, 0.1328, -0.1348, 0.8281, 2.2188, 5.2812, 1.2734, 1.5312, 0.8398, 0.7070] ), } ) # fmt: on expected_logits = expected_logits.get_expectation().to(model.device, torch.bfloat16) torch.testing.assert_close(out.logits[0, -1, :32], expected_logits, atol=1e-2, rtol=1e-2) self.assertEqual(out.logits.shape[1], inputs.input_ids.shape[1]) self.assertEqual(out.logits.shape[2], model.config.vocab_size) self.assertFalse(torch.isnan(out.logits).any()) self.assertFalse(torch.isinf(out.logits).any()) @slow @require_deterministic_for_xpu def test_cwm_sliding_window_long_sequence(self): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/cwm") # original `sliding_window` is `8192`, but it causes GPU OOM on A10 model = CwmForCausalLM.from_pretrained( "facebook/cwm", device_map="auto", dtype=torch.bfloat16, sliding_window=4096 ) sliding_window = model.config.sliding_window long_text = "for i in range(1000):\n print(f'iteration {i}')\n" * 270 inputs = tokenizer(long_text, return_tensors="pt").to(model.device) seq_len = inputs.input_ids.shape[1] # create a sequence longer than sliding window self.assertGreater( seq_len, sliding_window, f"Test sequence length {seq_len} should be > sliding window {sliding_window}" ) with torch.no_grad(): out = model(**inputs) # fmt: off expected_logits = Expectations( { ("cuda", None): torch.tensor( [5.2812, 6.4688, 12.8125, 4.6875, 5.2500, 4.2500, 6.9688, 4.9375, 2.7656, 6.5938, 4.9688, 1.1016, 5.9375, 3.7500, 3.1094, 5.5312, 6.1250, 4.7500, 4.5312, 2.8281, 4.0625, 3.3125, 3.9219, 3.3906, 3.1406, 3.6719, 3.2031, 7.0938, 4.8750, 6.0000, 2.7188, 6.2500] ), ("xpu", None): torch.Tensor( [5.2500, 6.4688, 12.8125, 4.6562, 5.2812, 4.2812, 7.0000, 4.9062, 2.7344, 6.5938, 4.9062, 1.1094, 5.9375, 3.7188, 3.0469, 5.5000, 6.0938, 4.7188, 4.5000, 2.7344, 4.0312, 3.2812, 3.8750, 3.3438, 3.1094, 3.6406, 3.2031, 7.1250, 4.8750, 6.0000, 2.7031, 6.2188] ), } ) # fmt: on expected_logits = expected_logits.get_expectation().to(model.device, torch.bfloat16) torch.testing.assert_close(out.logits[0, -1, :32], expected_logits, atol=1e-2, rtol=1e-2) logits = out.logits.to("cpu") self.assertEqual(logits.shape[1], seq_len) self.assertEqual(logits.shape[2], model.config.vocab_size) self.assertFalse(torch.isnan(logits).any()) self.assertFalse(torch.isinf(logits).any()) for i, layer in enumerate(model.model.layers): if model.config.layer_types[i] == "sliding_attention": self.assertEqual(layer.self_attn.sliding_window, sliding_window) @slow def test_cwm_generation_20_tokens(self): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/cwm") model = CwmForCausalLM.from_pretrained("facebook/cwm", device_map="auto", dtype=torch.bfloat16) system_prompt = "You are a helpful AI assistant. You always reason before responding, using the following format:\n\n<think>\nyour internal reasoning\n</think>\nyour external response" messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": "Write a simple Python function to add two numbers."}, ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True, preserve_previous_think=True, ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) with torch.no_grad(): generated_ids = model.generate( **model_inputs, max_new_tokens=20, do_sample=False, temperature=1.0, top_p=1.0, pad_token_id=tokenizer.eos_token_id, ) output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :].tolist() generated_text = tokenizer.decode(output_ids, skip_special_tokens=False) self.assertEqual(len(output_ids), 20, "Should generate exactly 20 tokens") expected_token_ids = [ 33413, 11, 358, 1205, 311, 3350, 264, 13325, 734, 430, 11621, 1403, 5219, 13, 6914, 596, 1212, 555, 89746, 1268, ] expected_text = "Okay, I need to write a Python function that adds two numbers. Let's start by recalling how" self.assertEqual(output_ids, expected_token_ids, "Generated tokens should match ground truth") self.assertEqual(generated_text, expected_text, "Generated text should match ground truth")
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/cwm/test_modeling_cwm.py", "license": "Apache License 2.0", "lines": 210, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/glm4v_moe/convert_glm4v_moe_mgt_weights_to_hf.py
# Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import pickle import re from pathlib import Path import torch from safetensors.torch import save_file # Avoid Using Megatron Lib class UnpicklerWrapper(pickle.Unpickler): def find_class(self, mod_name, name): class DummyClass: def __init__(self, *args, **kwargs): pass if mod_name.startswith("megatron") or mod_name.startswith("glm") or mod_name.startswith("__main__"): return DummyClass return super().find_class(mod_name, name) pickle.Unpickler = UnpicklerWrapper def dict_access_multi(a_dict, keys): if len(keys) == 0: return a_dict return dict_access_multi(a_dict[keys[0]], keys[1:]) def merge_qkv( sd_list, original_tp, num_attention_heads, multi_query_group_num, attention_dim, interleaved_qkv, ): group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim q, k, v = [], [], [] for sd in sd_list: if interleaved_qkv: shape = sd.shape q_, k_, v_ = sd.view((multi_query_group_num // original_tp, group_size) + (shape[1:])).split( [ (num_attention_heads // multi_query_group_num * attention_dim), attention_dim, attention_dim, ], dim=1, ) q_ = q_.reshape((-1,) + (shape[1:])) k_ = k_.reshape((-1,) + (shape[1:])) v_ = v_.reshape((-1,) + (shape[1:])) else: q_, k_, v_ = sd.split( [ num_attention_heads * attention_dim // original_tp, multi_query_group_num * attention_dim // original_tp, multi_query_group_num * attention_dim // original_tp, ], dim=0, ) q.append(q_.clone()) k.append(k_.clone()) v.append(v_.clone()) q = torch.cat(q, dim=0) k = torch.cat(k, dim=0) v = torch.cat(v, dim=0) return q, k, v def merge_glu(sd_list): return torch.cat( [sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list] + [sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0, ) def merge_glu_vit(sd_list, original_tp=None): if not isinstance(sd_list, list): sd_list = [sd_list] gate_proj = torch.cat([sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list], dim=0) up_proj = torch.cat([sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0) return gate_proj, up_proj def split_glu(sd, cnt, idx): return torch.cat( ( sd.chunk(dim=0, chunks=2)[0].chunk(cnt, dim=0)[idx].clone(), sd.chunk(dim=0, chunks=2)[1].chunk(cnt, dim=0)[idx].clone(), ), dim=0, ) def find_expert_weight(input_dict, layer_num, fc1=True): if fc1: pattern = re.compile(rf"^decoder\.layers\.{layer_num}\.mlp\.experts\.linear_fc1\.weight(\d+)$") else: pattern = re.compile(rf"^decoder\.layers\.{layer_num}\.mlp\.experts\.linear_fc2\.weight(\d+)$") matched = [] for key in input_dict: match = pattern.match(key) if match: weight_num = int(match.group(1)) matched.append((weight_num, key)) matched.sort(key=lambda x: x[0]) weights = [None for _ in range(len(matched) * len(input_dict[matched[0][1]]))] for idx, key in matched: for i, weight in enumerate(input_dict[key]): weights[i * len(matched) + idx] = weight return weights def merge_tensors( tp_sd, keys, original_tp, target_tp, current_tp, slice_dim=None, merge_fn=None, ): cnt = original_tp // target_tp offset = cnt * current_tp sd_list = [dict_access_multi(tp_sd[i + offset], keys) for i in range(cnt)] if slice_dim is not None: return torch.cat(sd_list, dim=slice_dim) assert merge_fn is not None return merge_fn(sd_list) def save_sharded_model(state_dict, output_path, max_shard_size_gb=5, num_layers=46, vision_num_layers=24): os.makedirs(output_path, exist_ok=True) layered_dict = {} for layer_idx in range(num_layers): layer_key = f"layer_{layer_idx}" layered_dict[layer_key] = {} for key, value in state_dict.items(): if f"model.language_model.layers.{layer_idx}." in key: if isinstance(value, list): assert len(value) == 1, f"{key} {value}" value = value[0] layered_dict[layer_key][key] = value for layer_idx in range(vision_num_layers): layer_key = f"visual_layer_{layer_idx}" layered_dict[layer_key] = {} for key, value in state_dict.items(): if f"model.visual.blocks.{layer_idx}." in key: layered_dict[layer_key][key] = value layered_dict["others"] = {} for key, value in state_dict.items(): if not any(f"model.language_model.layers.{i}." in key for i in range(num_layers)) and not any( f"model.visual.blocks.{i}." in key for i in range(vision_num_layers) ): layered_dict["others"][key] = value # Determine layer ordering layer_order = [] for i in range(num_layers): layer_order.append(f"layer_{i}") for i in range(vision_num_layers): layer_order.append(f"visual_layer_{i}") layer_order.append("others") # Calculate sizes and create shards by layer param_sizes = {} shards = [] current_shard = {} current_shard_size = 0 max_shard_size_bytes = max_shard_size_gb * 1024 * 1024 * 1024 for layer_key in layer_order: layer_weights = layered_dict[layer_key] layer_size = sum(param.numel() * param.element_size() for param in layer_weights.values()) if current_shard_size + layer_size > max_shard_size_bytes and current_shard: shards.append(current_shard) current_shard = {} current_shard_size = 0 for param_name, param in layer_weights.items(): current_shard[param_name] = param current_shard_size += param.numel() * param.element_size() param_sizes[param_name] = param.numel() * param.element_size() if current_shard: shards.append(current_shard) index_dict = {"metadata": {"total_size": sum(param_sizes.values())}, "weight_map": {}} for i, shard in enumerate(shards): shard_filename = f"model-{i + 1:05d}-of-{len(shards):05d}.safetensors" shard_path = os.path.join(output_path, shard_filename) for param_name in shard: index_dict["weight_map"][param_name] = shard_filename save_file(shard, shard_path, metadata={"format": "pt"}) print(f"Saved shard {i + 1}/{len(shards)}: {shard_filename}") print(f" Shard size: {sum(p.numel() * p.element_size() for p in shard.values()) / (1024**3):.2f} GB") print(f" Keys in shard: {len(shard)}") index_path = os.path.join(output_path, "model.safetensors.index.json") with open(index_path, "w") as f: json.dump(index_dict, f, indent=2) return len(shards) def merge_tp_weights(model_path, output_path, vllm_config_path=None): origin_tp, origin_ep, origin_pp = -1, -1, -1 check_ep_or_pp_later = False for item in Path(model_path).iterdir(): if item.is_dir(): match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?", item.name) if match: groups = match.groups() tp = int(groups[0]) origin_tp = max(origin_tp, tp + 1) # maybe TP-EP or TP-PP, need check later if groups[1] is not None and groups[2] is None: pp = int(groups[1]) origin_pp = max(origin_pp, pp + 1) origin_ep = 1 check_ep_or_pp_later = True elif groups[1] is not None and groups[2] is not None: pp = int(groups[1]) ep = int(groups[2]) origin_pp = max(origin_pp, pp + 1) origin_ep = max(origin_ep, ep + 1) else: origin_ep = 1 origin_pp = 1 tensor_names_by_file = {} mgt_sd = {} for item in Path(model_path).iterdir(): if item.is_dir(): match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?$", item.name) if match: groups = match.groups() tp = int(groups[0]) pp = int(groups[1]) if groups[1] is not None else 0 ep = int(groups[2]) if groups[2] is not None else 0 file_path = item / "model_optim_rng.pt" assert file_path.exists(), f"model_optim_rng.pt not found in {item}" file_sd = torch.load(file_path, map_location="cpu", weights_only=False) for k in list(file_sd.keys()): if "_extra_state" in k or "dummy_parameter" in k: file_sd.pop(k) mgt_sd[(tp, pp, ep)] = file_sd tensor_names = set() if "model" in file_sd: for key in file_sd["model"].keys(): tensor_names.add(key) tensor_names_by_file[(tp, pp, ep)] = tensor_names change_pp_to_ep = False if check_ep_or_pp_later: prefix_distribution = {} for (tp, pp, ep), prefixes in tensor_names_by_file.items(): for prefix in prefixes: if prefix not in prefix_distribution: prefix_distribution[prefix] = set() prefix_distribution[prefix].add((tp, pp, ep)) for prefix, locations in prefix_distribution.items(): if len(locations) > 1: pp_values = {loc[1] for loc in locations} if len(pp_values) > 1: print(f"find '{prefix}' in multi ranks {pp_values} the parallelism should be TP-EP") origin_ep = origin_pp origin_pp = 1 change_pp_to_ep = True break else: print(f"find '{prefix}' only in one ep, parallelism should be TP-PP") break print(f"Detected tensor parallel degree TP={origin_tp} EP={origin_ep} PP={origin_pp}") if origin_tp <= 1 and origin_ep <= 1 and origin_pp <= 1: print("Model is already at TP=1 EP=1 PP=1, no need to merge") return assert max(origin_tp, origin_ep) * origin_pp == len(tensor_names_by_file), "maybe some problem in origin weight" organized_sd = {} for (tp, pp, ep), file_sd in mgt_sd.items(): if change_pp_to_ep: pp, ep = ep, pp organized_sd.setdefault(pp, {}) organized_sd[pp][(ep, tp)] = file_sd find_vpp = "model0" in file_sd # support VPP, if each pp rank has n vpp blocks, we will treat the original model # was parallel as pp n * origin_pp if find_vpp: organized_sd_vpp = {} for i in range(origin_pp): for (ep, tp), file_sd in organized_sd[i].items(): model_keys = sorted( [key for key in file_sd.keys() if key.startswith("model") and key[5:].isdigit()], key=lambda x: int(x[5:]), ) vp_blocks = len(model_keys) for idx, key in enumerate(model_keys): assert key in file_sd, f"model {key} not found" organized_sd_vpp.setdefault(idx * origin_pp + i, {}) organized_sd_vpp[idx * origin_pp + i][(ep, tp)] = {"model": file_sd[key]} origin_pp = origin_pp * vp_blocks organized_sd = organized_sd_vpp ignore_list = ["_extra_state", "dummy_parameter"] layer_share_list = [ "norm", "conv3d", "downsample", "router", "mlp.linear_fc2.bias", "self_attention.linear_proj.bias", "position_embeddings", ] full_weights = {} vit_layer_offset = 0 llm_layer_offset = 0 llm_layer_pattern = re.compile(r"^(decoder\.layers\.)(\d+)(\..*)$") vit_layer_pattern = re.compile(r"^(vision_model\.transformer\.layers\.)(\d+)(\..*)$") for pp in sorted(organized_sd.keys()): pp_dict = organized_sd[pp] next_llm_layer_offset = llm_layer_offset next_vit_layer_offset = vit_layer_offset ep_map = {} tp_map = {} tp_seen = set() for (ep, tp), item in pp_dict.items(): if tp not in tp_seen: tp_seen.add(tp) tp_map[tp] = item ep_map[ep] = item for tp in sorted(tp_map.keys()): sd = tp_map[tp] for full_name, tensor in sd["model"].items(): if any(x in full_name for x in ignore_list): continue llm_name_match = llm_layer_pattern.match(full_name) if llm_name_match: # Use a closure to avoid global variable issues def offset_layer(x, offset=llm_layer_offset): nonlocal next_llm_layer_offset _real_layer = int(x.group(2)) + offset next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1) return f"{x.group(1)}{_real_layer}{x.group(3)}" full_name = llm_layer_pattern.sub(offset_layer, full_name) vit_name_match = vit_layer_pattern.match(full_name) if vit_name_match: # Use a closure to avoid global variable issues def offset_layer(x, offset=vit_layer_offset): nonlocal next_vit_layer_offset _real_layer = int(x.group(2)) + offset next_vit_layer_offset = max(next_vit_layer_offset, _real_layer + 1) return f"{x.group(1)}{_real_layer}{x.group(3)}" full_name = vit_layer_pattern.sub(offset_layer, full_name) if layer_share_list and any(x in full_name for x in layer_share_list): if full_name not in full_weights: full_weights[full_name] = tensor else: assert torch.equal(tensor, full_weights[full_name]), ( f"detect diff param in tp named: {full_name}" ) elif not re.search(r"\.experts\.", full_name): full_weights.setdefault(full_name, [None for _ in range(origin_tp)]) full_weights[full_name][tp] = tensor for ep in sorted(ep_map.keys()): sd = ep_map[ep] for full_name, tensor in sd["model"].items(): if any(x in full_name for x in ignore_list): continue name_match = llm_layer_pattern.match(full_name) if name_match: # Use a closure to avoid global variable issues def offset_layer(x, offset=llm_layer_offset): nonlocal next_llm_layer_offset _real_layer = int(x.group(2)) + offset next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1) return f"{x.group(1)}{_real_layer}{x.group(3)}" full_name = llm_layer_pattern.sub(offset_layer, full_name) if re.search(r"\.experts\.", full_name): full_weights.setdefault(full_name, [None for _ in range(origin_ep)]) full_weights[full_name][ep] = tensor llm_layer_offset = next_llm_layer_offset vit_layer_offset = next_vit_layer_offset for k in sorted(full_weights.keys()): item = full_weights[k] if isinstance(item, list): print(f"{k} {len(item)} {item[0].shape} {item[0].dtype}", flush=True) else: print(f"{k} {item.shape} {item.dtype}", flush=True) print(f"Loading vLLM configuration file: {vllm_config_path}") with open(vllm_config_path, "r") as f: model_config = json.load(f) print(model_config) text_config = model_config.get("text_config", {}) vision_config = model_config.get("vision_config", {}) num_layers = text_config.get("num_hidden_layers", 46) llm_num_heads = text_config.get("num_attention_heads", 96) num_kv_heads = text_config.get("num_key_value_heads", 8) llm_attn_query_size = text_config.get("llm_attn_query_size", 12288) head_dim = text_config.get("attention_dim", llm_attn_query_size // llm_num_heads) vision_num_layers = vision_config.get("depth", 24) vit_n_head = vision_config.get("num_heads", 12) print( f"Model parameters: num_layers={num_layers}, vision_num_layers={vision_num_layers}, " f"num_heads={llm_num_heads}, multi_query_group_num={num_kv_heads}, llm_attn_query_size={llm_attn_query_size}" ) print("Merging tensor parallel weights...") interleaved_qkv = True num_attention_heads = llm_num_heads multi_query_group_num = num_kv_heads attention_dim = head_dim complete_state_dict = {} # LLM layer_i = 0 while f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights: if f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights: complete_state_dict[f"model.language_model.layers.{layer_i}.input_layernorm.weight"] = full_weights[ f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" ] if f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight" in full_weights: complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = ( full_weights[f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight"] ) elif f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" in full_weights: complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = ( full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"] ) q, k, v = merge_qkv( sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.weight"], original_tp=origin_tp, num_attention_heads=num_attention_heads, multi_query_group_num=multi_query_group_num, attention_dim=attention_dim, interleaved_qkv=interleaved_qkv, ) complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.weight"] = q.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.weight"] = k.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.weight"] = v.clone() if f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias" in full_weights: q_bias, k_bias, v_bias = merge_qkv( sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias"], original_tp=origin_tp, num_attention_heads=num_attention_heads, multi_query_group_num=multi_query_group_num, attention_dim=attention_dim, interleaved_qkv=interleaved_qkv, ) complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.bias"] = q_bias.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.bias"] = k_bias.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.bias"] = v_bias.clone() o_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1) complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.o_proj.weight"] = o_proj.clone() if f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc1.weight" in full_weights: routed_expert_fc1_weights = find_expert_weight(full_weights, layer_i, fc1=True) for idx, weight in enumerate(routed_expert_fc1_weights): gate_proj_weight, up_proj_weight = merge_glu_vit([weight]) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.gate_proj.weight"] = ( gate_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.up_proj.weight"] = ( up_proj_weight.clone() ) routed_expert_fc2_weights = find_expert_weight(full_weights, layer_i, fc1=False) for idx, weight in enumerate(routed_expert_fc2_weights): complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.down_proj.weight"] = ( weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate.e_score_correction_bias"] = ( full_weights[f"decoder.layers.{layer_i}.mlp.router.expert_bias"] ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate.weight"] = full_weights[ f"decoder.layers.{layer_i}.mlp.router.weight" ] gate_proj_weight, up_proj_weight = merge_glu_vit( full_weights[f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc1.weight"] ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.gate_proj.weight"] = ( gate_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.up_proj.weight"] = ( up_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.down_proj.weight"] = ( full_weights[f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc2.weight"] ) else: # MLP - Use gate_up_proj gate_proj_weight, up_proj_weight = merge_glu_vit( full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.weight"] ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate_proj.weight"] = ( gate_proj_weight.clone() ) complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone() complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1 ) layer_i += 1 # Embedd Model, LM Head, and Norm embed_tokens = torch.cat(full_weights["embedding.word_embeddings.weight"], dim=0) complete_state_dict["model.language_model.embed_tokens.weight"] = embed_tokens.clone() lm_head = torch.cat(full_weights["output_layer.weight"], dim=0) complete_state_dict["lm_head.weight"] = lm_head.clone() complete_state_dict["model.language_model.norm.weight"] = full_weights["decoder.final_layernorm.weight"].clone() # VLM for layer_i in range(vision_num_layers): complete_state_dict[f"model.visual.blocks.{layer_i}.norm1.weight"] = full_weights[ f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" ] complete_state_dict[f"model.visual.blocks.{layer_i}.norm2.weight"] = full_weights[ f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" ] q, k, v = merge_qkv( sd_list=full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.weight"], original_tp=origin_tp, num_attention_heads=vit_n_head, multi_query_group_num=vit_n_head, attention_dim=attention_dim, interleaved_qkv=interleaved_qkv, ) complete_state_dict[f"model.visual.blocks.{layer_i}.attn.qkv.weight"] = torch.cat((q, k, v), dim=0) proj_weight = torch.cat( full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1 ) complete_state_dict[f"model.visual.blocks.{layer_i}.attn.proj.weight"] = proj_weight.clone() gate_proj_weight, up_proj_weight = merge_glu_vit( full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.weight"] ) complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.gate_proj.weight"] = gate_proj_weight.clone() complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone() down_proj_weight = torch.cat( full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1 ) complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.down_proj.weight"] = down_proj_weight.clone() complete_state_dict["model.visual.downsample.weight"] = ( full_weights["vision_model.downsample.weight"].clone().contiguous() ) complete_state_dict["model.visual.downsample.bias"] = ( full_weights["vision_model.downsample.bias"].clone().contiguous() ) # Merger gate_proj, up_proj = merge_glu_vit(full_weights["vision_projection.encoder.linear_fc1.weight"]) down_proj = torch.cat(full_weights["vision_projection.encoder.linear_fc2.weight"], dim=1) proj = torch.cat(full_weights["vision_projection.linear_fc_extra.weight"], dim=0) complete_state_dict["model.visual.merger.gate_proj.weight"] = gate_proj.clone().contiguous() complete_state_dict["model.visual.merger.up_proj.weight"] = up_proj.clone().contiguous() complete_state_dict["model.visual.merger.down_proj.weight"] = down_proj.clone().contiguous() complete_state_dict["model.visual.merger.proj.weight"] = proj.clone().contiguous() if "vision_projection.layer_norm.weight" in full_weights: complete_state_dict["model.visual.merger.post_projection_norm.weight"] = full_weights[ "vision_projection.layer_norm.weight" ] if "vision_projection.layer_norm.bias" in full_weights: complete_state_dict["model.visual.merger.post_projection_norm.bias"] = full_weights[ "vision_projection.layer_norm.bias" ] complete_state_dict["model.visual.embeddings.position_embedding.weight"] = ( full_weights["vision_model.position_embeddings.weight"].clone().contiguous() ) complete_state_dict["model.visual.patch_embed.proj.weight"] = ( full_weights["vision_model.conv3d.weight"].clone().contiguous() ) complete_state_dict["model.visual.patch_embed.proj.bias"] = ( full_weights["vision_model.conv3d.bias"].clone().contiguous() ) # Check for additional vision model norm layers mentioned in the expected output if "vision_model.post_conv_layernorm.weight" in full_weights: complete_state_dict["model.visual.post_conv_layernorm.weight"] = ( full_weights["vision_model.post_conv_layernorm.weight"].clone().contiguous() ) if "vision_model.post_layernorm.weight" in full_weights: complete_state_dict["model.visual.post_layernorm.weight"] = ( full_weights["vision_model.post_layernorm.weight"].clone().contiguous() ) print(f"Total keys in state dict: {len(complete_state_dict)}") print("bias use Float32") save_sharded_model( complete_state_dict, output_path=output_path, max_shard_size_gb=5, num_layers=num_layers, vision_num_layers=vision_num_layers, ) hf_config = { "architectures": ["Glm4vMoeForConditionalGeneration"], "model_type": "glm4v_moe", "image_start_token_id": model_config.get("image_start_token_id", 151339), "image_end_token_id": model_config.get("image_end_token_id", 151340), "video_start_token_id": model_config.get("video_start_token_id", 151341), "video_end_token_id": model_config.get("video_end_token_id", 151342), "transformers_version": "4.57.0.dev0", } txt_config = { "model_type": "glm4v_moe_text", "attention_bias": model_config.get("add_qkv_bias", True), "use_qk_norm": model_config.get("use_qk_norm", False), "attention_dropout": 0.0, "pad_token_id": model_config.get("pad_token_id", 151329), "eos_token_id": model_config.get("eos_token_id", [151329, 151336, 151338]), "image_token_id": model_config.get("image_token_id", 151363), "video_token_id": model_config.get("video_token_id", 151364), "hidden_act": text_config.get("hidden_act", "silu"), "hidden_size": text_config.get("hidden_size", 4096), "initializer_range": 0.02, "intermediate_size": text_config.get("intermediate_size", 10944), "max_position_embeddings": text_config.get("seq_length", 131072), "num_attention_heads": text_config.get("num_attention_heads", 96), "num_hidden_layers": text_config.get("num_layers", 46), "num_key_value_heads": text_config.get("multi_query_group_num", 2), "rms_norm_eps": text_config.get("layernorm_epsilon", 1e-05), "dtype": text_config.get("torch_dtype", "bfloat16"), "use_cache": text_config.get("use_cache", True), "vocab_size": text_config.get("vocab_size", 151424), "partial_rotary_factor": 0.5, "tie_word_embeddings": False, "moe_intermediate_size": text_config.get("moe_intermediate_size", 1408), "n_group": text_config.get("n_group", 1), "n_routed_experts": text_config.get("n_routed_experts", 128), "n_shared_experts": text_config.get("n_shared_experts", 1), "norm_topk_prob": text_config.get("norm_topk_prob", True), "num_experts_per_tok": text_config.get("num_experts_per_tok", 8), "rope_parameters": { "rope_type": "default", "rope_theta": 10000.0, "mrope_section": [8, 12, 12], "partial_rotary_factor": 0.5, }, } hf_config["text_config"] = txt_config if "vision_config" in model_config: vision_config = { "model_type": "glm4v_moe_vision", "hidden_size": model_config["vision_config"].get("hidden_size", 1536), "depth": model_config["vision_config"].get("num_layers", 24), "num_heads": model_config["vision_config"].get("num_attention_heads", 12), "attention_bias": model_config["vision_config"].get("attention_bias", False), "intermediate_size": model_config.get("ffn_hidden_size", 13696), "hidden_act": model_config["vision_config"].get("hidden_act", "silu"), "hidden_dropout_prob": model_config["vision_config"].get("hidden_dropout_prob", 0.0), "initializer_range": 0.02, "image_size": model_config["vision_config"].get("image_size", 336), "patch_size": model_config["vision_config"].get("patch_size", 14), "out_hidden_size": model_config.get("hidden_size", 4096), "rms_norm_eps": model_config["vision_config"].get("layernorm_epsilon", 1e-05), "spatial_merge_size": model_config["vision_config"].get("downsample_ratio", 2), "temporal_patch_size": model_config["vision_config"].get("t_patch", 2), } hf_config["vision_config"] = vision_config config_path = os.path.join(output_path, "config.json") with open(config_path, "w") as f: json.dump(hf_config, f, indent=2) print(f"Conversion complete! Model saved to {output_path}") def parse_args(): parser = argparse.ArgumentParser(description="Convert Megatron model to HuggingFace format") parser.add_argument( "--model_path", type=str, required=True, help="Path to Megatron model directory", ) parser.add_argument("--output_path", type=str, required=True, help="Output path for HuggingFace model directory") parser.add_argument( "--config_path", type=str, help="Path to vLLM configuration file for creating HuggingFace config" ) return parser.parse_args() if __name__ == "__main__": args = parse_args() merge_tp_weights(args.model_path, args.output_path, args.config_path)
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/glm4v_moe/convert_glm4v_moe_mgt_weights_to_hf.py", "license": "Apache License 2.0", "lines": 651, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/utils/type_validators.py
from collections.abc import Sequence from typing import Any, Union, cast from ..tokenization_utils_base import PaddingStrategy, TruncationStrategy from ..video_utils import VideoMetadataType from .generic import TensorType from .import_utils import is_torch_available, is_vision_available if is_vision_available(): from ..image_utils import PILImageResampling if is_torch_available(): import torch def positive_any_number(value: int | float | None = None): if value is not None and (not isinstance(value, (int, float)) or not value >= 0): raise ValueError(f"Value must be a positive integer or floating number, got {value}") def positive_int(value: int | None = None): if value is not None and (not isinstance(value, int) or not value >= 0): raise ValueError(f"Value must be a positive integer, got {value}") def padding_validator(value: bool | str | PaddingStrategy | None = None): possible_names = ["longest", "max_length", "do_not_pad"] if value is None: pass elif not isinstance(value, (bool, str, PaddingStrategy)): raise ValueError("Value for padding must be either a boolean, a string or a `PaddingStrategy`") elif isinstance(value, str) and value not in possible_names: raise ValueError(f"If padding is a string, the value must be one of {possible_names}") def truncation_validator(value: bool | str | TruncationStrategy | None = None): possible_names = ["only_first", "only_second", "longest_first", "do_not_truncate"] if value is None: pass elif not isinstance(value, (bool, str, TruncationStrategy)): raise ValueError("Value for truncation must be either a boolean, a string or a `TruncationStrategy`") elif isinstance(value, str) and value not in possible_names: raise ValueError(f"If truncation is a string, value must be one of {possible_names}") def image_size_validator(value: int | Sequence[int] | dict[str, int] | None = None): possible_keys = ["height", "width", "longest_edge", "shortest_edge", "max_height", "max_width"] if value is None: pass elif isinstance(value, dict) and any(k not in possible_keys for k in value.keys()): raise ValueError(f"Value for size must be a dict with keys {possible_keys} but got size={value}") def device_validator(value: str | int | None = None): possible_names = ["cpu", "cuda", "xla", "xpu", "mps", "meta"] if value is None: pass elif is_torch_available() and isinstance(value, torch.device): # Convert torch.device to string for validation device_str = str(value) if device_str.split(":")[0] not in possible_names: raise ValueError( f"If device is a torch.device, the value must be one of {possible_names} but got device={value}" ) elif isinstance(value, int) and value < 0: raise ValueError( f"If device is an integer, the value must be a strictly positive integer but got device={value}" ) elif isinstance(value, str) and value.split(":")[0] not in possible_names: raise ValueError(f"If device is an string, the value must be one of {possible_names} but got device={value}") elif not isinstance(value, (int, str)): raise ValueError( f"Device must be either an integer device ID, a string (e.g., 'cpu', 'cuda:0'), or a torch.device object, but got device={value}" ) def resampling_validator(value: Union[int, "PILImageResampling"] | None = None): if value is None: pass elif isinstance(value, int) and value not in list(range(6)): raise ValueError( f"The resampling should be one of {list(range(6))} when provided as integer, but got resampling={value}" ) elif is_vision_available() and not isinstance(value, (PILImageResampling, int)): raise ValueError(f"The resampling should an integer or `PIL.Image.Resampling`, but got resampling={value}") def video_metadata_validator(value: VideoMetadataType | None = None): if value is None: return valid_keys = ["total_num_frames", "fps", "width", "height", "duration", "video_backend", "frames_indices"] def check_dict_keys(d: dict[str, Any]) -> bool: return all(key in valid_keys for key in d.keys()) if isinstance(value, Sequence) and isinstance(value[0], Sequence) and isinstance(value[0][0], dict): for sublist in value: for item in sublist: if not check_dict_keys(item): raise ValueError( f"Invalid keys found in video metadata. Valid keys: {valid_keys} got: {list(item.keys())}" ) elif isinstance(value, Sequence) and isinstance(value[0], dict): for item in value: if not check_dict_keys(item): raise ValueError( f"Invalid keys found in video metadata. Valid keys: {valid_keys} got: {list(cast(dict, item).keys())}" ) elif isinstance(value, dict): if not check_dict_keys(value): raise ValueError( f"Invalid keys found in video metadata. Valid keys: {valid_keys}, got: {list(value.keys())}" ) def tensor_type_validator(value: str | TensorType | None = None): possible_names = ["pt", "np", "mlx"] if value is None: pass elif not isinstance(value, str) or value not in possible_names: raise ValueError(f"The tensor type should be one of {possible_names} but got tensor_type={value}")
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/utils/type_validators.py", "license": "Apache License 2.0", "lines": 98, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/transformers:src/transformers/models/lfm2_moe/configuration_lfm2_moe.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters class Lfm2MoeConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Lfm2MoeModel`]. It is used to instantiate a LFM2 Moe model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LFM2-8B-A1B model. e.g. [LiquidAI/LFM2-8B-A1B](https://huggingface.co/LiquidAI/LFM2-8B-A1B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65536): Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Lfm2Model`] hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 7168): Dimension of the MLP representations. moe_intermediate_size (`int`, *optional*, defaults to 1792): Intermediate size of the routed expert. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. max_position_embeddings (`int`, *optional*, defaults to 128000): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. conv_bias (`bool`, *optional*, defaults to `False`): Whether to use bias in the conv layers. conv_L_cache (`int`, *optional*, defaults to 3): L_cache dim in the conv layers. num_dense_layers (`int`, *optional*, defaults to 2): Number of dense Lfm2MoeMLP layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head). num_experts_per_tok (`int`, *optional*, defaults to 4): Number of selected experts. num_experts (`int`, *optional*, defaults to 32): Number of routed experts. use_expert_bias (`bool`, *optional*, defaults to `True`): Whether to use the expert bias on the routing weights. routed_scaling_factor (`float`, *optional*, defaults to 1.0): Scaling factor for routed experts in MoE models. norm_topk_prob (`bool`, *optional*, defaults to `True`): Whether to normalize the topk probabilities. layer_types (`Optional`, *optional*): Type of each layers. ```python >>> from transformers import Lfm2MoeModel, Lfm2MoeConfig >>> # Initializing a LFM2 Moe model >>> configuration = Lfm2MoeConfig() >>> # Initializing a model from the LFM2-8B-A1B style configuration >>> model = Lfm2MoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "lfm2_moe" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 1000000.0 def __init__( self, vocab_size: int = 65536, hidden_size: int = 2048, intermediate_size: int = 7168, moe_intermediate_size: int = 1792, num_hidden_layers: int = 32, pad_token_id: int = 0, bos_token_id: int = 1, eos_token_id: int = 2, tie_word_embeddings: bool = True, rope_parameters: RopeParameters = None, max_position_embeddings: int = 128_000, initializer_range: float = 0.02, use_cache: bool = True, norm_eps: float = 0.00001, num_attention_heads: int = 32, num_key_value_heads: int = 8, conv_bias: bool = False, conv_L_cache: int = 3, num_dense_layers: int = 2, num_experts_per_tok: int = 4, num_experts: int = 32, use_expert_bias: bool = True, routed_scaling_factor: float = 1.0, norm_topk_prob: bool = True, layer_types: list[str] | None = None, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.use_cache = use_cache self.norm_eps = norm_eps # attn operator config self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads # custom operator config self.conv_bias = conv_bias self.conv_L_cache = conv_L_cache # moe config self.num_dense_layers = num_dense_layers self.moe_intermediate_size = moe_intermediate_size self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.use_expert_bias = use_expert_bias self.routed_scaling_factor = routed_scaling_factor self.norm_topk_prob = norm_topk_prob self.layer_types = layer_types self.initializer_range = initializer_range self.rope_parameters = rope_parameters tie_word_embeddings = kwargs.get("tie_embedding", tie_word_embeddings) # to fit original config keys self.tie_word_embeddings = tie_word_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(**kwargs) __all__ = ["Lfm2MoeConfig"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/lfm2_moe/configuration_lfm2_moe.py", "license": "Apache License 2.0", "lines": 157, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/lfm2_moe/modular_lfm2_moe.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F from torch import nn from ... import initialization as init from ...masking_utils import create_causal_mask from ...modeling_outputs import MoeModelOutputWithPast from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.import_utils import is_causal_conv1d_available from ..lfm2.modeling_lfm2 import ( Lfm2Attention, Lfm2DecoderLayer, Lfm2HybridConvCache, Lfm2MLP, Lfm2RotaryEmbedding, Lfm2ShortConv, ) from ..llama.modeling_llama import LlamaForCausalLM, LlamaPreTrainedModel, LlamaRMSNorm from ..mixtral.modeling_mixtral import MixtralModel from ..qwen2_moe.modeling_qwen2_moe import Qwen2MoeExperts from .configuration_lfm2_moe import Lfm2MoeConfig if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_fn, causal_conv1d_update = None, None kernel_modules = (causal_conv1d_fn, causal_conv1d_update) is_fast_path_available = all(kernel_modules) logger = logging.get_logger(__name__) class Lfm2MoeRMSNorm(LlamaRMSNorm): pass class Lfm2MoeRotaryEmbedding(Lfm2RotaryEmbedding): pass class Lfm2MoeMLP(Lfm2MLP): def __init__(self, config: Lfm2MoeConfig, intermediate_size: int | None = None): nn.Module.__init__(self) self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) class Lfm2MoeExperts(Qwen2MoeExperts): def __init__(self, config): super().__init__(config) self.act_fn = F.silu class Lfm2MoeSparseMoeBlock(nn.Module): def __init__(self, config): super().__init__() self.top_k = config.num_experts_per_tok self.routed_scaling_factor = config.routed_scaling_factor self.norm_topk_prob = config.norm_topk_prob self.use_expert_bias = config.use_expert_bias self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False) self.experts = Lfm2MoeExperts(config) if self.use_expert_bias: self.register_buffer("expert_bias", torch.zeros(config.num_experts, dtype=torch.float32)) def route_tokens_to_experts(self, router_logits): routing_weights = router_logits.sigmoid() if self.use_expert_bias: scores_for_routing = routing_weights + self.expert_bias _, selected_experts = torch.topk(scores_for_routing, k=self.top_k, dim=-1) routing_weights = torch.gather(routing_weights, dim=1, index=selected_experts).type_as(router_logits) else: routing_weights, selected_experts = torch.topk(routing_weights, k=self.top_k, dim=-1) if self.norm_topk_prob: routing_weights = routing_weights / (routing_weights.sum(dim=-1, keepdim=True) + 1e-6) routing_weights = routing_weights * self.routed_scaling_factor return selected_experts, routing_weights def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states_reshaped = hidden_states.view(-1, hidden_dim) router_logits = self.gate(hidden_states_reshaped) selected_experts, routing_weights = self.route_tokens_to_experts(router_logits) final_hidden_states = self.experts(hidden_states_reshaped, selected_experts, routing_weights) return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) class Lfm2MoeHybridConvCache(Lfm2HybridConvCache): pass class Lfm2MoeAttention(Lfm2Attention): pass class Lfm2MoeShortConv(Lfm2ShortConv): pass class Lfm2MoeDecoderLayer(Lfm2DecoderLayer): def __init__(self, config: Lfm2MoeConfig, layer_idx: int): super().__init__(config, layer_idx) self.feed_forward = ( Lfm2MoeMLP(config, intermediate_size=config.intermediate_size) if layer_idx < config.num_dense_layers else Lfm2MoeSparseMoeBlock(config) ) class Lfm2MoePreTrainedModel(LlamaPreTrainedModel): _can_compile_fullgraph = False # uses a non-compilable custom cache class Lfm2MoeHybridConvCache @torch.no_grad() def _init_weights(self, module): PreTrainedModel._init_weights(self, module) if isinstance(module, Lfm2MoeExperts): init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) elif isinstance(module, Lfm2MoeSparseMoeBlock): if module.use_expert_bias: init.zeros_(module.expert_bias) class Lfm2MoeModel(MixtralModel): def __init__(self, config: Lfm2MoeConfig): super().__init__(config) self.pos_emb = Lfm2MoeRotaryEmbedding(config) self.embedding_norm = Lfm2MoeRMSNorm(config.hidden_size, eps=config.norm_eps) del self.norm del self.rotary_emb def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Lfm2MoeHybridConvCache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: batch_size = inputs_embeds.shape[0] past_key_values = Lfm2MoeHybridConvCache( config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # Skip masking for decoding stage. We check shape here to be compile-friendly linear_attention = attention_mask if inputs_embeds.shape[1] != 1 else None hidden_states = inputs_embeds position_embeddings = self.pos_emb(hidden_states, position_ids=position_ids) # decoder layers for decoder_layer in self.layers[: self.config.num_hidden_layers]: layer_mask = causal_mask if decoder_layer.is_attention_layer else linear_attention hidden_states = decoder_layer( hidden_states, attention_mask=layer_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.embedding_norm(hidden_states) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Lfm2MoeForCausalLM(LlamaForCausalLM): pass __all__ = ["Lfm2MoeForCausalLM", "Lfm2MoeModel", "Lfm2MoePreTrainedModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/lfm2_moe/modular_lfm2_moe.py", "license": "Apache License 2.0", "lines": 179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/lfm2_moe/test_modeling_lfm2_moe.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LLaMA model.""" import unittest from transformers import AutoTokenizer, is_torch_available, set_seed from transformers.testing_utils import ( Expectations, cleanup, require_deterministic_for_xpu, require_torch, require_torch_accelerator, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import Lfm2MoeConfig, Lfm2MoeForCausalLM, Lfm2MoeModel from transformers.models.lfm2_moe.modeling_lfm2_moe import Lfm2MoeHybridConvCache class Lfm2MoeModelTester(CausalLMModelTester): if is_torch_available(): config_class = Lfm2MoeConfig base_model_class = Lfm2MoeModel causal_lm_class = Lfm2MoeForCausalLM def __init__( self, parent, num_dense_layers=1, num_hidden_layers=2, layer_types=["full_attention", "conv"], ): super().__init__(parent) self.layer_types = layer_types self.num_dense_layers = num_dense_layers self.num_hidden_layers = num_hidden_layers @require_torch class Lfm2MoeModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = (Lfm2MoeModel, Lfm2MoeForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Lfm2MoeModel, "text-generation": Lfm2MoeForCausalLM, } if is_torch_available() else {} ) model_tester_class = Lfm2MoeModelTester # used in `test_torch_compile_for_training` _torch_compile_train_cls = Lfm2MoeForCausalLM if is_torch_available() else None def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): self.assertIsInstance(past_key_values, Lfm2MoeHybridConvCache) # (batch, kv heads, seq_length, head_dim) num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads) head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) attention_shape = (batch_size, num_heads, seq_length, head_dim) conv_shape = (batch_size, config.hidden_size, config.conv_L_cache) for i in range(config.num_hidden_layers): if config.layer_types[i] == "full_attention": self.assertEqual(past_key_values.key_cache[i].shape, attention_shape) self.assertEqual(past_key_values.value_cache[i].shape, attention_shape) else: self.assertEqual(past_key_values.conv_cache[i].shape, conv_shape) def _check_caches_are_equal(self, cache1: Lfm2MoeHybridConvCache, cache2: Lfm2MoeHybridConvCache): if not isinstance(cache1, Lfm2MoeHybridConvCache) or not isinstance(cache2, Lfm2MoeHybridConvCache): raise ValueError("The wrong cache is being used!") if not len(cache1) == len(cache2): raise ValueError("Both caches do not have the same number of layers.") num_layers = len(cache1) for idx in range(num_layers): torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx]) torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx]) torch.testing.assert_close(cache1.conv_cache[idx], cache2.conv_cache[idx]) def test_attention_outputs(self): """Lfm2Moe alternates between attention and short-conv layers.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # force eager attention to support output attentions config._attn_implementation = "eager" seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager").to(torch_device).eval() config = model.config with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types)) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config).to(torch_device).eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types)) self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len]) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config).to(torch_device).eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self_attentions = outputs.attentions self.assertEqual(out_len + 1, len(outputs)) self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types)) self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len]) @require_torch_accelerator @slow class Lfm2MoeIntegrationTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.model = None @classmethod def tearDownClass(cls): del cls.model cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @classmethod def get_model(cls): if cls.model is None: cls.model = Lfm2MoeForCausalLM.from_pretrained( "LiquidAI/LFM2-8B-A1B", device_map="auto", dtype=torch.bfloat16, experts_implementation="eager", ) return cls.model @slow def test_model_1a8b_logits(self): set_seed(1789) input_ids = [1, 22998, 768, 1947, 797, 22017, 811, 6332, 928, 5743, 797, 779, 48123, 772, 33551, 60996, 523] model = self.get_model() input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.float().cpu() # fmt: off # Expected mean on dim = -1 EXPECTED_MEANS = Expectations( { ("cuda", None): torch.tensor([[-1.3912, -0.4653, -1.3339, -1.3249, -1.0985, -1.2373, -1.4599, -0.7515, -0.6140, -1.2329, -1.1481, -1.0081, -0.9937, -0.8875, -1.5539, -1.7283, -1.6284]]), ("xpu", None): torch.tensor([[-1.3879, -0.4730, -1.3193, -1.3139, -1.0826, -1.2129, -1.4744, -0.7485, -0.6004, -1.2353, -1.1602, -1.0432, -1.0180, -0.9099, -1.5949, -1.7487, -1.5991]]), } ) # fmt: on EXPECTED_MEAN = EXPECTED_MEANS.get_expectation() out_mean = out.mean(-1) torch.testing.assert_close(out_mean, EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # fmt: off # Expected portion of the logits EXPECTED_SLICES = Expectations( { ("cuda", None): torch.tensor([-1.2734, 2.4844, 5.5000, -1.3438, -1.3281, -1.3516, 1.9375, 5.8438, -0.6641, -1.2969]), ("xpu", None): torch.tensor([-1.2734, 2.4531, 5.4688, -1.3438, -1.3281, -1.3516, 1.9297, 5.7812, -0.6719, -1.3125]), } ) # fmt: on EXPECTED_SLICE = EXPECTED_SLICES.get_expectation() out_slice = out[0, 0, :10] torch.testing.assert_close(out_slice, EXPECTED_SLICE, rtol=1e-4, atol=1e-4) @slow def test_model_1a8b_generation(self): EXPECTED_TEXT_COMPLETION = """In 1st century A.D., the Roman Empire controlled much of Europe, North Africa, and parts of the Middle East.""" set_seed(1789) prompt = "In 1st century A.D., the Roman Empire" tokenizer = AutoTokenizer.from_pretrained("LiquidAI/LFM2-8B-A1B", use_fast=False) model = self.get_model() input_ids = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=True).to( model.model.embed_tokens.weight.device ) with torch.no_grad(): generated_ids = model.generate(input_ids, max_new_tokens=15, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow @require_deterministic_for_xpu def test_model_1a8b_batched_chat_generation(self): prompts = ["Who are you?", "Complete the text: Lorem ipsum dolor ", "The Meji Restoration in Japan ended"] # fmt: off EXPECTED_TEXT_COMPLETIONS = Expectations( { ("cuda", None): [ "Who are you? (AI) designed to assist? \nI am an AI assistant developed to", "Complete the text: Lorem ipsum dolor ipsum dolor ipsum dolor ipsum dolor ipsum.", "The Meji Restoration in Japan ended** \n**A.** The shogunate was abolished, and imperial" ], ("xpu", None): [ "Who are you? (AI) designed to assist? \nI am an AI language model developed", "Complete the text: Lorem ipsum dolor ipsum dolor ipsum dolor ipsum dolor ipsum dolor", "The Meji Restoration in Japan ended, which occurred in 1868, marked the: \nA) Establish" ], } ) # fmt: on EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation() set_seed(1789) tokenizer = AutoTokenizer.from_pretrained("LiquidAI/LFM2-8B-A1B", use_fast=False) model = self.get_model() batched_input_ids = tokenizer(prompts, return_tensors="pt", padding=True).to( model.model.embed_tokens.weight.device ) with torch.no_grad(): generated_ids = model.generate(**batched_input_ids, max_new_tokens=15, do_sample=False) text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/lfm2_moe/test_modeling_lfm2_moe.py", "license": "Apache License 2.0", "lines": 219, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/utils/kernel_config.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import PushToHubMixin, is_torch_available if is_torch_available(): import torch def infer_device(model): """ Infers the device type from the model parameters. Args: model: The model instance. Returns: The device type. """ EXAMPLE_MAPPING = """ { "RMSNorm": { "cuda": "kernels-community/layer_norm:LlamaRMSNorm", ... }, ... } """ try: param = next(model.parameters()) except StopIteration: raise ValueError( f"Cannot determine model device, please provide a device to the mapping. Example: {EXAMPLE_MAPPING}" ) dev_type = param.device.type if dev_type == "cuda": # Refine based on actual platform if torch.version.hip is not None: return "rocm" return dev_type def add_to_mapping(layer_name, device, repo_name, mode, compatible_mapping): from kernels import LayerRepository if device not in ["cuda", "rocm", "xpu", "npu"]: raise ValueError(f"Only cuda, rocm, xpu and npu devices supported, got: {device}") repo_layer_name = repo_name.split(":")[1] repo_id = repo_name.split(":")[0] compatible_mapping[layer_name] = { device: { mode: LayerRepository( repo_id=repo_id, layer_name=repo_layer_name, ) } } def add_to_mapping_local(layer_name, device, repo_name, mode, compatible_mapping): from pathlib import Path from kernels import LocalLayerRepository if device not in ["cuda", "rocm", "xpu", "npu"]: raise ValueError(f"Only cuda, rocm, xpu and npu devices supported, got: {device}") repo_layer_name = repo_name.split(":")[1] repo_path = repo_name.split(":")[0] repo_package_name = repo_path.split("/")[-1] compatible_mapping[layer_name] = { device: { mode: LocalLayerRepository( repo_path=Path(repo_path), package_name=repo_package_name, layer_name=repo_layer_name, ) } } class KernelConfig(PushToHubMixin): """ Kernel configuration class. This class is used to configure the kernel mapping for a model. """ def __init__(self, kernel_mapping={}, use_local_kernel=False): self.kernel_mapping = kernel_mapping self.registered_layer_names = {} self.use_local_kernel = use_local_kernel def update_kernel(self, repo_id, registered_name, layer_name, device, mode, revision=None): from kernels import LayerRepository self.kernel_mapping[registered_name] = { device: { mode: LayerRepository( repo_id=repo_id, layer_name=layer_name, revision=revision, ) } } def store_registered_layer_names(self, model): for name, module in model.named_modules(): if hasattr(module, "kernel_layer_name"): self.registered_layer_names[name] = module.kernel_layer_name def sanitize_kernel_mapping(self, model): """ Validates the kernel_mapping to ensure that: 1. Each layer_name in the mapping is registered in the model (i.e., the model contains a module with a matching kernel_layer_name). 2. Each kernel value is either a string of the form 'org/repo:layer_name' or a dict mapping device types ("cuda", "rocm", "xpu", "npu") to such strings. 3. Each device key in a dict is one of "cuda", "rocm", "xpu", or "npu". 4. Each repo_name is a valid repository and layer name in the format 'org/repo:layer_name' (i.e., a string containing both a slash and a colon). 5. If a local path is detected, it should be in the format '/abs/path:layer_name'. The absolute path must include the `package_name`, like "/home/user/layer_norm". Args: model: The model instance whose modules are checked for registered kernel_layer_name attributes. Raises: ValueError: If a layer_name is not registered in the model, if a device is not supported, or if a repo_name is not a valid 'org/repo:layer_name' string. """ MAPPING_FORMAT = """ For single device form remote { "RMSNorm": "kernels-community/layer_norm:LlamaRMSNorm", ... }, For multiple devices form remote { "RMSNorm": { "cuda": "kernels-community/layer_norm:LlamaRMSNorm", "rocm": "kernels-community/layer_norm:LlamaRMSNorm", ... }, ... } For single device form local { "RMSNorm": "/abs/path:LlamaRMSNorm", ... }, For multiple devices form local { "RMSNorm": { "cuda": "/abs/path:LlamaRMSNorm", "rocm": "/abs/path:LlamaRMSNorm", ... }, ... } """ self.store_registered_layer_names(model) # Validate that the kernel mapping is a dict if not isinstance(self.kernel_mapping, dict): raise ValueError( f"Kernel mapping must be a dict of the following format: {MAPPING_FORMAT}, got: {type(self.kernel_mapping)}" ) for layer_name, kernel in self.kernel_mapping.items(): if layer_name not in self.registered_layer_names.values(): raise ValueError( f"Layer {layer_name} is not registered in the model, please register it first using use_kernel_forward_from_hub" ) if isinstance(kernel, str): if "/" not in kernel or ":" not in kernel: raise ValueError( f"Kernel mapping for '{layer_name}' must be a valid repo name with a layer name (e.g., 'org/repo:layer_name' or '/abs/path:layer_name'), got: {kernel}" ) elif isinstance(kernel, dict): for device, repo_name in kernel.items(): if device not in ["cuda", "rocm", "xpu", "npu"]: raise ValueError(f"Only cuda, rocm, xpu and npu devices supported, got: {device}") if not isinstance(repo_name, str) or "/" not in repo_name or ":" not in repo_name: raise ValueError( f"Kernel mapping for '{layer_name}' must be a valid repo name with a layer name (e.g., 'org/repo:layer_name' or '/abs/path:layer_name'), got: {repo_name}" ) else: raise ValueError(f"Kernel mapping must follow the format: {MAPPING_FORMAT}, got: {kernel}") def create_compatible_mapping(self, model, compile=False): """ Transforms a simple kernel_mapping of the form: { "RMSNorm": "kernels-community/layer_norm:LlamaRMSNorm", ... }, or for local path: { "RMSNorm": "/home/user/liger_kernels:LigerRMSNorm", ... }, into a nested mapping: { "RMSNorm": { "cuda": { Mode.INFERENCE: LayerRepository( repo_id="kernels-community/layer_norm", layer_name="LlamaRMSNorm", ) } } } or for local path: { "RMSNorm": { "cuda": { Mode.INFERENCE: LocalLayerRepository( repo_path=Path("/home/user/liger_kernels"), package_name="liger_kernels", layer_name="LigerRMSNorm", ) } } } that's compatible with the kernels library. The device is inferred from the model's parameters if not provided. The Mode is inferred from the model's training state. """ from kernels import Mode compatible_mapping = {} current_device = infer_device(model) for layer_name, kernel in self.kernel_mapping.items(): # Infer Mode: use Mode.TRAINING if model is training, else use Mode.INFERENCE mode = Mode.TRAINING if model.training else Mode.INFERENCE if compile: mode = mode | Mode.TORCH_COMPILE if isinstance(kernel, str): repo_name = kernel if not self.use_local_kernel: add_to_mapping(layer_name, current_device, repo_name, mode, compatible_mapping) else: add_to_mapping_local(layer_name, current_device, repo_name, mode, compatible_mapping) elif isinstance(kernel, dict): for device, repo_name in kernel.items(): if device != current_device: continue if not self.use_local_kernel: add_to_mapping(layer_name, device, repo_name, mode, compatible_mapping) else: add_to_mapping_local(layer_name, device, repo_name, mode, compatible_mapping) self.kernel_mapping = compatible_mapping
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/utils/kernel_config.py", "license": "Apache License 2.0", "lines": 240, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:utils/modular_model_detector.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 🔴🔴🔴 THIS IS AN INTERNAL TOOL. It WILL interact with the hub and use significant local compute resources. Use at your own risk. """ Modular model detector: utilities for detecting code similarities between model implementations. This module provides tools to analyze and detect similarities between different model implementations in the transformers library. It uses both embedding-based and token-based (Jaccard) similarity metrics to identify similar code patterns across different model definitions. Its function is to identify which models can be _modular_-ized, meaning, which already existing classes are present in the codebase and look very similar to the one we have. Two scores are computed, one is a code embedding, and the other is a simple Jaccard bag-of-tokens index for overlap of token sets. A score of 1.00 means the code is identical. Usage: ```bash cd transformers # Use directly the util, it will download the index embedding from the hub. It will require some RAM/VRAM. >>> python utils/modular_model_detector.py --modeling-file my_new_beit3_modeling_file.py Loading checkpoint shards: 100%|███████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 33.62it/s] encoding 21 query definitions with Qwen/Qwen3-Embedding-4B (device=cuda, batch=16, max_length=4096) stuff.py::Beit3ImageTextMatchingOutput: embedding: blip_2::Blip2ImageTextMatchingModelOutput (0.9994) chinese_clip::ChineseCLIPOutput (0.9818) owlvit::OwlViTOutput (0.9818) aimv2::Aimv2Output (0.9818) blip::BlipOutput (0.9818) jaccard: owlv2::Owlv2Output (0.9667) metaclip_2::MetaClip2Output (0.9667) altclip::AltCLIPOutput (0.9667) owlvit::OwlViTOutput (0.9667) blip::BlipOutput (0.9667) intersection: blip::BlipOutput owlvit::OwlViTOutput stuff.py::Beit3MLP: embedding: efficientloftr::EfficientLoFTRMLP (0.9718) seggpt::SegGptMlp (0.9650) mgp_str::MgpstrMlp (0.9646) vitpose_backbone::VitPoseBackboneMLP (0.9640) granitemoeshared::GraniteMoeSharedMLP (0.9633) jaccard: chinese_clip::ChineseCLIPTextSelfOutput (0.5294) convbert::ConvBertSelfOutput (0.5294) bert::BertSelfOutput (0.5294) roformer::RoFormerSelfOutput (0.5294) layoutlmv3::LayoutLMv3SelfOutput (0.5294) intersection: stuff.py::Beit3FeedForwardNetwork: embedding: prophetnet::ProphetNetFeedForward (0.9766) dab_detr::DabDetrDecoderLayerFFN (0.9730) kosmos2::Kosmos2TextFFN (0.9697) kosmos2_5::Kosmos2_5TextFFN (0.9697) parakeet::ParakeetEncoderFeedForward (0.9678) jaccard: groupvit::GroupViTMLP (0.4898) convbert::ConvBertOutput (0.4600) chinese_clip::ChineseCLIPTextOutput (0.4565) bert::BertOutput (0.4565) roformer::RoFormerOutput (0.4565) intersection: ``` # If you wish to build the index first, you can run python utils/modular_model_detector.py --build # You can also change the embedding model for a larger/smaller one. """ import argparse import ast import json import logging import os import re from datetime import datetime from functools import cache from pathlib import Path import numpy as np import torch from huggingface_hub import HfApi, snapshot_download from huggingface_hub import logging as huggingface_hub_logging from safetensors.numpy import load_file as safetensors_load from safetensors.numpy import save_file as safetensors_save from tqdm import tqdm import transformers from transformers import AutoModel, AutoTokenizer from transformers.utils import enable_tf32 from transformers.utils import logging as transformers_logging # ANSI color codes for CLI output styling ANSI_RESET = "\033[0m" ANSI_BOLD = "\033[1m" ANSI_HEADER = "\033[1;36m" ANSI_SECTION = "\033[1;35m" ANSI_ROW = "\033[0;37m" ANSI_HIGHLIGHT_TOP = "\033[1;32m" ANSI_HIGHLIGHT_OLD = "\033[1;33m" ANSI_HIGHLIGHT_CANDIDATE = "\033[1;34m" os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" os.environ["TRANSFORMERS_VERBOSITY"] = "error" MODELS_ROOT = Path("src/transformers/models") EMBEDDINGS_PATH = "embeddings.safetensors" INDEX_MAP_PATH = "code_index_map.json" TOKENS_PATH = "code_index_tokens.json" HUB_DATASET_DEFAULT = "hf-internal-testing/transformers_code_embeddings" EMBEDDING_MODEL = "Qwen/Qwen3-Embedding-4B" BATCH_SIZE = 16 MAX_LENGTH = 4096 def _normalize(string: str | None) -> str: """ Normalize a string by removing all non-alphanumeric characters and converting to lowercase. Args: string (`str` or `None`): The string to normalize. Returns: `str`: The normalized string, or empty string if input is None. """ return re.sub(r"[^a-z0-9]+", "", string.lower()) if string else "" def _strip_source_for_tokens(code: str) -> str: """ Strip docstrings, comments, and import statements from source code. Args: code (`str`): The source code to strip. Returns: `str`: The stripped source code. """ code = re.sub(r'("""|\'\'\')(?:.|\n)*?\1', "", code) code = re.sub(r"#.*", "", code) return "\n".join(line for line in code.splitlines() if not re.match(r"\s*(from|import)\s+", line)) def _tokenize(code: str) -> set[str]: """ Extract all Python identifiers from source code. Args: code (`str`): The source code to tokenize. Returns: `set[str]`: A set of all identifiers found in the code. """ return set(re.findall(r"\b[a-zA-Z_][a-zA-Z0-9_]*\b", code)) def _leading_symbol_prefix(name: str) -> str: """ Extract the leading prefix from a symbol name (e.g., 'Llama' from 'LlamaAttention'). Args: name (`str`): The symbol name to extract prefix from. Returns: `str`: The leading prefix, or empty string if no match. """ match = re.match(r"^([A-Z][a-z0-9]+)", name) or re.match(r"^([A-Za-z0-9]+)", name) return match.group(1) if match else "" def _sanitize_for_embedding(code: str, model_hint: str | None, symbol_hint: str | None) -> str: """ Sanitize code for embedding by replacing model-specific identifiers with generic placeholder. Args: code (`str`): The source code to sanitize. model_hint (`str` or `None`): Hint about the model name (e.g., 'llama'). symbol_hint (`str` or `None`): Hint about the symbol name (e.g., 'LlamaAttention'). Returns: `str`: The sanitized code with model-specific identifiers replaced by 'Model'. """ base = _strip_source_for_tokens(code) variants = set() if model_hint: variants.add(model_hint) variants.add(model_hint.replace("_", "")) variants.add(re.sub(r"\d+", "", model_hint)) if symbol_hint: prefix = _leading_symbol_prefix(symbol_hint) if prefix: variants.add(prefix) variants.add(prefix.replace("_", "")) variants.add(re.sub(r"\d+", "", prefix)) variants |= {variant.lower() for variant in list(variants)} sanitized = base for variant in sorted({x for x in variants if len(x) >= 3}, key=len, reverse=True): sanitized = re.sub(re.escape(variant), "Model", sanitized, flags=re.IGNORECASE) return sanitized class CodeSimilarityAnalyzer: """ Analyzer for detecting code similarities between model implementations. This class uses embedding-based and token-based similarity metrics to identify similar code patterns across different model definitions in the transformers library. Args: hub_dataset (`str`): The Hub dataset repository ID containing the code embeddings index. """ def __init__(self, hub_dataset: str): for name in ("huggingface_hub", "httpx", "urllib3", "transformers"): logging.getLogger(name).setLevel(logging.ERROR) huggingface_hub_logging.set_verbosity_error() transformers_logging.set_verbosity_error() enable_tf32(True) torch.set_grad_enabled(False) self.models_root = MODELS_ROOT self.hub_dataset = hub_dataset self.tokenizer = AutoTokenizer.from_pretrained(EMBEDDING_MODEL) self.model = AutoModel.from_pretrained(EMBEDDING_MODEL, torch_dtype="auto", device_map="auto").eval() self.device = self.model.device self.index_dir: Path | None = None # ---------- HUB IO ---------- def _resolve_index_path(self, filename: str) -> Path: if self.index_dir is None: return Path(filename) return self.index_dir / filename def ensure_local_index(self) -> None: """Ensure index files are available locally, preferring Hub cache snapshots.""" if self.index_dir is not None and all( (self.index_dir / fname).exists() for fname in (EMBEDDINGS_PATH, INDEX_MAP_PATH, TOKENS_PATH) ): return workspace_dir = Path.cwd() if all((workspace_dir / fname).exists() for fname in (EMBEDDINGS_PATH, INDEX_MAP_PATH, TOKENS_PATH)): self.index_dir = workspace_dir return logging.info(f"downloading index from hub cache: {self.hub_dataset}") snapshot_path = snapshot_download(repo_id=self.hub_dataset, repo_type="dataset") snapshot_dir = Path(snapshot_path) missing = [ fname for fname in (EMBEDDINGS_PATH, INDEX_MAP_PATH, TOKENS_PATH) if not (snapshot_dir / fname).exists() ] if missing: raise FileNotFoundError("Missing expected files in Hub snapshot: " + ", ".join(missing)) self.index_dir = snapshot_dir def push_index_to_hub(self) -> None: """Upload index files to the Hub dataset repository.""" api = HfApi() api.create_repo(repo_id=self.hub_dataset, repo_type="dataset", exist_ok=True) for fname in (EMBEDDINGS_PATH, INDEX_MAP_PATH, TOKENS_PATH): logging.info(f"pushing {fname} -> {self.hub_dataset}") api.upload_file( path_or_fileobj=fname, path_in_repo=os.path.basename(fname), repo_id=self.hub_dataset, repo_type="dataset", ) # ---------- parsing & encoding ---------- def _extract_definitions( self, file_path: Path, relative_to: Path | None = None, model_hint: str | None = None ) -> tuple[dict[str, str], dict[str, str], dict[str, list[str]], dict[str, str]]: """ Extract class and function definitions from a Python file. Args: file_path (`Path`): Path to the Python file to parse. relative_to (`Path` or `None`): Base path for computing relative identifiers. model_hint (`str` or `None`): Model name hint for sanitization. Returns: `tuple[dict[str, str], dict[str, str], dict[str, list[str]], dict[str, str]]`: A tuple containing: - definitions_raw: Mapping of identifiers to raw source code - definitions_sanitized: Mapping of identifiers to sanitized source code - definitions_tokens: Mapping of identifiers to sorted token lists - definitions_kind: Mapping of identifiers to either "class" or "function" """ definitions_raw = {} definitions_sanitized = {} definitions_tokens = {} definitions_kind = {} source = file_path.read_text(encoding="utf-8") lines = source.splitlines() tree = ast.parse(source) for node in ast.iter_child_nodes(tree): if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): segment = ast.get_source_segment(source, node) if segment is None and hasattr(node, "lineno") and hasattr(node, "end_lineno"): start = max(0, node.lineno - 1) end = node.end_lineno segment = "\n".join(lines[start:end]) if segment: identifier = ( f"{file_path.relative_to(relative_to)}:{node.name}" if relative_to else f"{file_path.name}:{node.name}" ) definitions_raw[identifier] = segment sanitized = _sanitize_for_embedding(segment, model_hint, node.name) definitions_sanitized[identifier] = sanitized definitions_tokens[identifier] = sorted(_tokenize(sanitized)) if isinstance(node, ast.ClassDef): definitions_kind[identifier] = "class" else: definitions_kind[identifier] = "function" return definitions_raw, definitions_sanitized, definitions_tokens, definitions_kind def _infer_model_from_relative_path(self, relative_path: Path) -> str | None: try: relative = relative_path.resolve().relative_to(self.models_root.resolve()) return relative.parts[0] except Exception: return None def _infer_query_model_name(self, modeling_file: Path) -> str | None: model = self._infer_model_from_relative_path(modeling_file) if model: return model stem = modeling_file.stem if stem.startswith("modeling_") and len(stem) > len("modeling_"): return stem[len("modeling_") :] return None def _encode_batch(self, texts: list[str]) -> np.ndarray: """ Encode a batch of texts into normalized embeddings. Args: texts (`list[str]`): List of text strings to encode. Returns: `np.ndarray`: Normalized embeddings as a float32 numpy array. """ encoded = self.tokenizer(texts, padding=True, truncation=True, max_length=MAX_LENGTH, return_tensors="pt") encoded = {key: value.to(self.device) for key, value in encoded.items()} with ( torch.autocast(device_type=self.device.type, dtype=self.dtype) if self.device.type == "cuda" else torch.no_grad() ): output = self.model(**encoded) if hasattr(output, "last_hidden_state"): embeddings = output.last_hidden_state mask = encoded["attention_mask"].unsqueeze(-1) embeddings = (embeddings * mask).sum(dim=1) / mask.sum(dim=1).clamp_min(1e-9) elif hasattr(output, "pooler_output"): embeddings = output.pooler_output else: embeddings = output[0].mean(dim=1) embeddings = torch.nn.functional.normalize(embeddings.float(), p=2, dim=1) return embeddings.cpu().numpy().astype("float32") def encode(self, texts: list[str]) -> np.ndarray: """ Encode a list of texts into embeddings, processing in batches. Args: texts (`list[str]`): List of text strings to encode. Returns: `np.ndarray`: Stacked embeddings for all texts. """ output = [] for i in tqdm(range(0, len(texts), BATCH_SIZE), desc="encode", leave=False): output.append(self._encode_batch(texts[i : i + BATCH_SIZE])) if self.device.type == "cuda": torch.cuda.empty_cache() return np.vstack(output) if output else np.zeros((0, 0), dtype="float32") # ---------- build & search ---------- def build_index(self) -> None: """Build the code similarity index from all modeling files and save to disk.""" logging.info("collecting files") files = list(self.models_root.rglob("modeling_*.py")) logging.info(f"parsing {len(files)} files") identifiers = [] sanitized_sources = [] tokens_map = {} for file_path in tqdm(files, desc="parse", leave=False): model_hint = self._infer_model_from_relative_path(file_path) ( _, definitions_sanitized, definitions_tokens, _, ) = self._extract_definitions(file_path, self.models_root, model_hint) for identifier in definitions_sanitized.keys(): identifiers.append(identifier) sanitized_sources.append(definitions_sanitized[identifier]) tokens_map[identifier] = definitions_tokens[identifier] logging.info( f"encoding {len(sanitized_sources)} definitions with {EMBEDDING_MODEL} (device={self.device.type}, batch={BATCH_SIZE}, max_length={MAX_LENGTH})" ) embeddings = self.encode(sanitized_sources) safetensors_save({"embeddings": embeddings}, EMBEDDINGS_PATH) with open(INDEX_MAP_PATH, "w", encoding="utf-8") as file: json.dump({int(i): identifiers[i] for i in range(len(identifiers))}, file) with open(TOKENS_PATH, "w", encoding="utf-8") as file: json.dump(tokens_map, file) self.index_dir = Path.cwd() def _topk_embedding( self, query_embedding_row: np.ndarray, base_embeddings: np.ndarray, identifier_map: dict[int, str], self_model_normalized: str, self_name: str, k: int, ) -> list[tuple[str, float]]: similarities = query_embedding_row @ base_embeddings.T indices = np.argpartition(-similarities, k + 32)[: k + 32] indices = indices[np.argsort(-similarities[indices])] output = [] for match_id in indices: identifier = identifier_map[int(match_id)] parent_relative_path, match_name = identifier.split(":", 1) parent_model = Path(parent_relative_path).parts[0] if match_name == self_name: continue if self_model_normalized and _normalize(parent_model) == self_model_normalized: continue output.append((identifier, float(similarities[match_id]))) if len(output) >= k: break return output def _topk_jaccard( self, query_tokens: set[str], identifiers: list[str], tokens_map: dict[str, list[str]], self_model_normalized: str, self_name: str, k: int, ) -> list[tuple[str, float]]: """ Find top-k most similar definitions using Jaccard similarity on token sets. Args: query_tokens (`set[str]`): Set of tokens from the query definition. identifiers (`list[str]`): List of all definition identifiers in the index. tokens_map (`dict[str, list[str]]`): Mapping of identifiers to their token lists. self_model_normalized (`str`): Normalized name of the query model to exclude. self_name (`str`): Name of the query definition to exclude. k (`int`): Number of top results to return. Returns: `list[tuple[str, float]]`: List of (identifier, score) tuples. """ scores = [] for identifier in identifiers: parent_relative_path, match_name = identifier.split(":", 1) parent_model = Path(parent_relative_path).parts[0] if match_name == self_name: continue if self_model_normalized and _normalize(parent_model) == self_model_normalized: continue tokens = set(tokens_map.get(identifier, [])) if not tokens or not query_tokens: continue score = len(query_tokens & tokens) / len(query_tokens | tokens) if score > 0: scores.append((identifier, score)) scores.sort(key=lambda x: x[1], reverse=True) return scores[:k] def analyze_file( self, modeling_file: Path, top_k_per_item: int = 5, allow_hub_fallback: bool = True, use_jaccard=False ) -> dict[str, dict[str, list]]: """ Analyze a modeling file and find similar code definitions in the index. Args: modeling_file (`Path`): Path to the modeling file to analyze. top_k_per_item (`int`, *optional*, defaults to 5): Number of top matches to return per definition. allow_hub_fallback (`bool`, *optional*, defaults to `True`): Whether to download index from Hub if not found locally. Returns: `dict[str, dict[str, list]]`: Dictionary mapping definition names to their similarity results. Each result contains 'embedding', 'jaccard', and 'intersection' keys. """ if allow_hub_fallback: self.ensure_local_index() base = safetensors_load(str(self._resolve_index_path(EMBEDDINGS_PATH))) base_embeddings = base["embeddings"] with open(self._resolve_index_path(INDEX_MAP_PATH), "r", encoding="utf-8") as file: identifier_map = {int(key): value for key, value in json.load(file).items()} identifiers = [identifier_map[i] for i in range(len(identifier_map))] with open(self._resolve_index_path(TOKENS_PATH), "r", encoding="utf-8") as file: tokens_map = json.load(file) self_model = self._infer_query_model_name(modeling_file) definitions_raw, definitions_sanitized, _, definitions_kind = self._extract_definitions( modeling_file, None, self_model ) query_identifiers = list(definitions_raw.keys()) query_sources_sanitized = [definitions_sanitized[key] for key in query_identifiers] query_tokens_list = [set(_tokenize(source)) for source in query_sources_sanitized] self_model_normalized = _normalize(self_model) logging.info( f"encoding {len(query_sources_sanitized)} query definitions with {EMBEDDING_MODEL} (device={self.device.type}, batch={BATCH_SIZE}, max_length={MAX_LENGTH})" ) query_embeddings = self.encode(query_sources_sanitized) output = {} for i, query_identifier in enumerate(query_identifiers): query_name = query_identifier.split(":")[-1] embedding_top = self._topk_embedding( query_embeddings[i], base_embeddings, identifier_map, self_model_normalized, query_name, top_k_per_item ) embedding_set = {identifier for identifier, _ in embedding_top} kind = definitions_kind.get(query_identifier, "function") entry = {"kind": kind, "embedding": embedding_top} if use_jaccard: jaccard_top = self._topk_jaccard( query_tokens_list[i], identifiers, tokens_map, self_model_normalized, query_name, top_k_per_item ) jaccard_set = {identifier for identifier, _ in jaccard_top} intersection = set(embedding_set & jaccard_set) entry.update({"jaccard": jaccard_top, "intersection": intersection}) output[query_name] = entry return output _RELEASE_RE = re.compile( r"(?:^|[\*_`\s>])(?:this|the)\s+model\s+was\s+released\s+on\s+(\d{4}-\d{2}-\d{2})\b", re.IGNORECASE ) def build_date_data() -> dict[str, str]: """ Scan Markdown files in `root_dir` and build {model_id: date_released}. - model_id is the filename without extension (e.g., "llama" for "llama.md") - date_released is the first YYYY-MM-DD matched after "...was released on ..." - Ignores non-*.md files and directories. Returns: dict[str, str]: mapping of model_id -> ISO date string (YYYY-MM-DD). Files without a match are simply omitted. """ root_dir = transformers.__file__.split("src/transformers")[0] root = Path(root_dir).joinpath("docs/source/en/model_doc") result: dict[str, str] = {} for md_path in root.glob("*.md"): try: text = md_path.read_text(encoding="utf-8", errors="ignore") except Exception: # Skip unreadable files quietly logging.info(f"Failed to read md for {md_path}") m = _RELEASE_RE.search(text) if m: model_id = md_path.stem # e.g., "llama" from "llama.md" result[model_id] = m.group(1) return result def _format_table(headers: list[str], rows: list[tuple[str, ...] | None], row_styles: list[str] | None = None) -> str: if not rows: return f"{ANSI_ROW}(no matches){ANSI_RESET}" widths = [len(header) for header in headers] for row in rows: if row is None: continue for idx, cell in enumerate(row): widths[idx] = max(widths[idx], len(cell)) header_line = " | ".join(header.ljust(widths[idx]) for idx, header in enumerate(headers)) divider = "-+-".join("-" * widths[idx] for idx in range(len(headers))) total_width = sum(widths) + 3 * (len(headers) - 1) styled_rows = [] style_idx = 0 for row in rows: if row is None: styled_rows.append(f"{ANSI_SECTION}{'-' * total_width}{ANSI_RESET}") continue line = " | ".join(cell.ljust(widths[col_idx]) for col_idx, cell in enumerate(row)) style = ANSI_ROW if row_styles and style_idx < len(row_styles) and row_styles[style_idx]: style = row_styles[style_idx] styled_rows.append(f"{style}{line}{ANSI_RESET}") style_idx += 1 return "\n".join([f"{ANSI_SECTION}{header_line}{ANSI_RESET}", divider] + styled_rows) def _parse_release_date(value: str) -> datetime | None: """Return a datetime parsed from YYYY-MM-DD strings, otherwise None.""" try: return datetime.strptime(value, "%Y-%m-%d") except (TypeError, ValueError): return None @cache def _load_definition_line_map(relative_path: str) -> dict[str, int]: """Return {definition_name: line_number} for top-level definitions in the given file.""" file_path = MODELS_ROOT / relative_path try: source = file_path.read_text(encoding="utf-8") except (FileNotFoundError, OSError): return {} # gracefully keep going try: tree = ast.parse(source) except SyntaxError: return {} line_map: dict[str, int] = {} for node in ast.iter_child_nodes(tree): if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): line_map[node.name] = getattr(node, "lineno", None) or 1 elif isinstance(node, ast.Assign): continue return line_map def _resolve_definition_location(relative_path: str, definition: str) -> tuple[str, str]: """Return full path and formatted line number string for the given definition.""" full_path = MODELS_ROOT / relative_path line = _load_definition_line_map(relative_path).get(definition) line_str = str(line) if line is not None else "?" return str(full_path), line_str def _colorize_heading(text: str) -> str: return f"{ANSI_HEADER}{ANSI_BOLD}{text}{ANSI_RESET}" def main(): """CLI entry point for the modular model detector.""" logging.basicConfig(level=logging.INFO, format="%(message)s") parser = argparse.ArgumentParser(prog="hf-code-sim") parser.add_argument("--build", action="store_true") parser.add_argument("--modeling-file", type=str, help='You can just specify "vits" if you are lazy like me.') parser.add_argument( "--push-new-index", action="store_true", help="After --build, push index files to a Hub dataset." ) parser.add_argument( "--hub-dataset", type=str, default=HUB_DATASET_DEFAULT, help="Hub dataset repo id to pull/push the index." ) parser.add_argument("--use_jaccard", type=bool, default=False, help="Whether or not to use jaccard index") args = parser.parse_args() analyzer = CodeSimilarityAnalyzer(hub_dataset=args.hub_dataset) if args.build: analyzer.build_index() if args.push_new_index: analyzer.push_index_to_hub() return if not args.modeling_file: raise SystemExit("Provide --modeling-file or use --build") dates = build_date_data() modeling_file = args.modeling_file if os.sep not in modeling_file: modeling_file = os.path.join("src", "transformers", "models", modeling_file, f"modeling_{modeling_file}.py") results = analyzer.analyze_file( Path(modeling_file), top_k_per_item=5, allow_hub_fallback=True, use_jaccard=args.use_jaccard ) modeling_filename = Path(modeling_file).name release_key = modeling_filename.split("modeling_")[-1][:-3] release_date = dates.get(release_key, "unknown release date") aggregate_scores: dict[str, float] = {} for data in results.values(): for identifier, score in data.get("embedding", []): try: relative_path, _ = identifier.split(":", 1) except ValueError: continue aggregate_scores[relative_path] = aggregate_scores.get(relative_path, 0.0) + score best_candidate_path: str | None = None if aggregate_scores: best_candidate_path = max(aggregate_scores.items(), key=lambda item: item[1])[0] best_model = Path(best_candidate_path).parts[0] if Path(best_candidate_path).parts else "?" best_release = dates.get(best_model, "unknown release date") logging.info( f"{ANSI_HIGHLIGHT_CANDIDATE}Closest overall candidate: {MODELS_ROOT / best_candidate_path}" f" (release: {best_release}, total score: {aggregate_scores[best_candidate_path]:.4f}){ANSI_RESET}" ) grouped: dict[str, list[tuple[str, dict]]] = {"class": [], "function": []} for query_name, data in results.items(): kind = data.get("kind", "function") grouped.setdefault(kind, []).append((query_name, data)) section_titles = [("class", "Classes"), ("function", "Functions")] legend_shown = False for kind, title in section_titles: entries = grouped.get(kind, []) if not entries: continue metrics_present: set[str] = set() for _, data in entries: if data.get("embedding"): metrics_present.add("embedding") if args.use_jaccard: if data.get("jaccard"): metrics_present.add("jaccard") if data.get("intersection"): metrics_present.add("intersection") include_metric_column = bool(metrics_present - {"embedding"}) headers = ["Symbol", "Path", "Score", "Release"] if include_metric_column: headers = ["Symbol", "Metric", "Path", "Score", "Release"] table_rows: list[tuple[str, ...] | None] = [] row_styles: list[str] = [] has_metric_rows = False logging.info(_colorize_heading(title)) for query_name, data in entries: if table_rows: table_rows.append(None) symbol_label = query_name if release_date: symbol_label = f"{symbol_label}" symbol_row = (symbol_label,) + ("",) * (len(headers) - 1) table_rows.append(symbol_row) row_styles.append(ANSI_BOLD) embedding_details: list[tuple[str, str, str, float, str]] = [] embedding_style_indices: list[int] = [] for identifier, score in data.get("embedding", []): try: relative_path, match_name = identifier.split(":", 1) except ValueError: continue model_id = Path(relative_path).parts[0] if Path(relative_path).parts else "?" match_release = dates.get(model_id, "unknown release date") full_path, line = _resolve_definition_location(relative_path, match_name) display_path = f"{full_path}:{line} ({match_name})" if include_metric_column: row = ("", "embedding", display_path, f"{score:.4f}", match_release) else: row = ("", display_path, f"{score:.4f}", match_release) table_rows.append(row) row_styles.append(ANSI_ROW) embedding_style_indices.append(len(row_styles) - 1) embedding_details.append((relative_path, model_id, match_name, score, match_release)) has_metric_rows = True if embedding_details: highest_score = None highest_idx = None for idx, (_, _, _, score, _) in enumerate(embedding_details): if highest_score is None or score > highest_score: highest_score = score highest_idx = idx if highest_idx is not None: row_styles[embedding_style_indices[highest_idx]] = ANSI_HIGHLIGHT_TOP if highest_score is not None: oldest_idx = None oldest_date = None for idx, (_, model_id, _, score, release_value) in enumerate(embedding_details): if highest_score - score > 0.1: continue parsed = _parse_release_date(release_value) if parsed is None: continue if oldest_date is None or parsed < oldest_date: oldest_date = parsed oldest_idx = idx if ( oldest_idx is not None and row_styles[embedding_style_indices[oldest_idx]] != ANSI_HIGHLIGHT_TOP ): row_styles[embedding_style_indices[oldest_idx]] = ANSI_HIGHLIGHT_OLD if best_candidate_path is not None: for idx, (relative_path, _, _, _, _) in enumerate(embedding_details): style_position = embedding_style_indices[idx] if row_styles[style_position] != ANSI_ROW: continue if relative_path == best_candidate_path: row_styles[style_position] = ANSI_HIGHLIGHT_CANDIDATE if args.use_jaccard: for identifier, score in data.get("jaccard", []): try: relative_path, match_name = identifier.split(":", 1) except ValueError: continue model_id = Path(relative_path).parts[0] if Path(relative_path).parts else "?" match_release = dates.get(model_id, "unknown release date") full_path, line = _resolve_definition_location(relative_path, match_name) display_path = f"{full_path}:{line} ({match_name})" if include_metric_column: row = ("", "jaccard", display_path, f"{score:.4f}", match_release) else: row = ("", display_path, f"{score:.4f}", match_release) table_rows.append(row) row_styles.append(ANSI_ROW) has_metric_rows = True if best_candidate_path == relative_path: row_styles[-1] = ANSI_HIGHLIGHT_CANDIDATE for identifier in sorted(data.get("intersection", [])): try: relative_path, match_name = identifier.split(":", 1) except ValueError: continue model_id = Path(relative_path).parts[0] if Path(relative_path).parts else "?" match_release = dates.get(model_id, "unknown release date") full_path, line = _resolve_definition_location(relative_path, match_name) display_path = f"{full_path}:{line} ({match_name})" if include_metric_column: row = ("", "intersection", display_path, "--", match_release) else: row = ("", display_path, "--", match_release) table_rows.append(row) row_styles.append(ANSI_ROW) has_metric_rows = True if best_candidate_path == relative_path: row_styles[-1] = ANSI_HIGHLIGHT_CANDIDATE if table_rows: if not legend_shown and has_metric_rows: logging.info( "Legend: " f"{ANSI_HIGHLIGHT_TOP}highest match{ANSI_RESET}, " f"{ANSI_HIGHLIGHT_OLD}oldest within 0.1{ANSI_RESET}, " f"{ANSI_HIGHLIGHT_CANDIDATE}closest overall candidate{ANSI_RESET}" ) legend_shown = True logging.info(_format_table(headers, table_rows, row_styles)) logging.info("") if __name__ == "__main__": main()
{ "repo_id": "huggingface/transformers", "file_path": "utils/modular_model_detector.py", "license": "Apache License 2.0", "lines": 759, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/dbrx/modular_dbrx.py
# Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Modular components for DBRX model.""" from collections.abc import Callable from typing import Any import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_layers import ( GradientCheckpointingLayer, ) from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.generic import merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ..llama.modeling_llama import ( LlamaRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from ..mixtral.modeling_mixtral import load_balancing_loss_func from .configuration_dbrx import DbrxConfig class DbrxRotaryEmbedding(LlamaRotaryEmbedding): pass class DbrxAttention(nn.Module): """Modular DBRX attention component that can be reused across different model architectures.""" def __init__( self, config, layer_idx: int | None = None, **kwargs, ): super().__init__() self.config = config self.hidden_size = config.d_model self.num_heads = config.n_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_seq_len self.layer_idx = layer_idx attn_config = config.attn_config self.attention_dropout = attn_config.attn_pdrop self.clip_qkv = attn_config.clip_qkv self.num_key_value_heads = attn_config.kv_n_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.scaling = self.head_dim**-0.5 self.rope_theta = attn_config.rope_theta self.is_causal = True self.Wqkv = nn.Linear( self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False ) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_embeddings: torch.LongTensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) qkv_states = self.Wqkv(hidden_states) min_val = -self.clip_qkv if self.clip_qkv is not None else None qkv_states = qkv_states.clamp(min=min_val, max=self.clip_qkv) query_states, key_states, value_states = qkv_states.split( [ self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim, ], dim=2, ) query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights class DbrxExpertGLU(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.ffn_hidden_size = config.ffn_hidden_size self.moe_num_experts = config.moe_num_experts self.w1 = nn.Parameter(torch.empty(self.moe_num_experts * self.ffn_hidden_size, self.hidden_size)) self.v1 = nn.Parameter(torch.empty(self.moe_num_experts * self.ffn_hidden_size, self.hidden_size)) self.w2 = nn.Parameter(torch.empty(self.moe_num_experts * self.ffn_hidden_size, self.hidden_size)) act_fn_name = config.ffn_act_fn.get("name", "silu") self.activation_fn = ACT2FN[act_fn_name] def forward( self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor ) -> torch.Tensor: gate_proj = x.matmul(expert_w1) up_proj = x.matmul(expert_v1) gate_proj = self.activation_fn(gate_proj) intermediate_states = gate_proj * up_proj down_proj = intermediate_states.matmul(expert_w2.t()) return down_proj class DbrxExperts(nn.Module): def __init__(self, config): super().__init__() self.mlp = DbrxExpertGLU(config) self.hidden_size = config.hidden_size self.ffn_hidden_size = config.ffn_hidden_size self.num_experts = config.moe_num_experts def forward( self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor, ) -> torch.Tensor: batch_size = hidden_states.shape[0] hidden_states = hidden_states.reshape(-1, self.ffn_hidden_size) next_states = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device) with torch.no_grad(): expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts) expert_mask = expert_mask.permute(2, 1, 0) expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() split_expert_shape = (-1, self.ffn_hidden_size, self.hidden_size) for expert_idx in expert_hit: expert_idx = expert_idx[0] with torch.no_grad(): idx, token_idx = torch.where(expert_mask[expert_idx]) v1 = self.mlp.v1.view(split_expert_shape)[expert_idx] w1 = self.mlp.w1.view(split_expert_shape)[expert_idx] w2 = self.mlp.w2.view(split_expert_shape)[expert_idx] states = self.mlp(hidden_states[token_idx], w1, v1, w2) states = states.view(-1, self.ffn_hidden_size) * top_k_weights[token_idx, idx, None] next_states.index_add_(0, token_idx, states) next_states = next_states.view(batch_size, -1, self.ffn_hidden_size) return next_states class DbrxRouter(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.ffn_hidden_size self.moe_jitter_eps = config.moe_jitter_eps self.layer = nn.Linear(self.hidden_size, config.moe_num_experts, bias=False) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.LongTensor]: if self.training and self.moe_jitter_eps is not None: hidden_states *= torch.empty_like(hidden_states).uniform_( 1.0 - self.moe_jitter_eps, 1.0 + self.moe_jitter_eps ) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) router_logits = self.layer(hidden_states) return router_logits class DbrxFFN(nn.Module): """Modular DBRX MLP/FFN component with MoE support.""" def __init__(self, config, **kwargs): super().__init__() self.router = DbrxRouter(config.ffn_config) self.experts = DbrxExperts(config.ffn_config) self.moe_normalize_expert_weights = config.ffn_config.moe_normalize_expert_weights self.top_k = config.ffn_config.moe_top_k def route_tokens_to_experts(self, router_logits): router_logits = torch.nn.functional.softmax(router_logits, dim=1, dtype=router_logits.dtype) router_top_value, router_indices = torch.topk(router_logits, self.top_k, dim=-1) if self.moe_normalize_expert_weights is not None: router_top_value = router_top_value / torch.norm( router_top_value, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True ) return router_top_value, router_indices def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: router_logits = self.router(hidden_states) top_k_weights, top_k_index = self.route_tokens_to_experts(router_logits) output = self.experts(hidden_states, top_k_index, top_k_weights) return output class DbrxNormAttentionNorm(nn.Module): def __init__(self, config: DbrxConfig, layer_idx: int | None = None): super().__init__() self.layer_idx = layer_idx self.resid_pdrop = config.resid_pdrop self.norm_1 = nn.LayerNorm(config.d_model, bias=False) self.attn = DbrxAttention( config=config, layer_idx=layer_idx, ) self.norm_2 = nn.LayerNorm(config.d_model, bias=False) def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.LongTensor, attention_mask: torch.Tensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Any, ) -> tuple[torch.Tensor, torch.Tensor]: residual_states = hidden_states hidden_states = self.norm_1(hidden_states).to(hidden_states.dtype) hidden_states, _ = self.attn( hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training) hidden_states = hidden_states + residual_states residual_states = hidden_states hidden_states = self.norm_2(hidden_states).to(hidden_states.dtype) return residual_states, hidden_states class DbrxBlock(GradientCheckpointingLayer): def __init__(self, config: DbrxConfig, layer_idx: int): super().__init__() self.hidden_size = config.d_model self.resid_pdrop = config.resid_pdrop self.layer_idx = layer_idx self.norm_attn_norm = DbrxNormAttentionNorm( config=config, layer_idx=layer_idx, ) self.ffn = DbrxFFN(config=config) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_embeddings: torch.LongTensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Any, ): resid_states, hidden_states = self.norm_attn_norm( hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = self.ffn(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training) hidden_states = resid_states + hidden_states return hidden_states class DbrxPreTrainedModel(PreTrainedModel): config: DbrxConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["DbrxBlock"] _skip_keys_device_placement = ["past_key_values"] _supports_flex_attn = True _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) _can_record_outputs = { "hidden_states": DbrxBlock, "attentions": DbrxAttention, } @torch.no_grad() def _init_weights(self, module: nn.Module): super()._init_weights(module) std = self.config.initializer_range if isinstance(module, DbrxExpertGLU): init.normal_(module.w1, mean=0.0, std=std) init.normal_(module.v1, mean=0.0, std=std) init.normal_(module.w2, mean=0.0, std=std) @auto_docstring class DbrxModel(DbrxPreTrainedModel): """Transformer decoder consisting of *config.num_hidden_layers*. Each layer is a [`DbrxBlock`] layer. Args: config ([`DbrxConfig`]): Model configuration class with all parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ def __init__(self, config: DbrxConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.emb_pdrop = config.emb_pdrop self.rotary_emb = DbrxRotaryEmbedding(config) self.wte = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.blocks = nn.ModuleList([DbrxBlock(config, layer_idx) for layer_idx in range(config.n_layers)]) self.norm_f = nn.LayerNorm(config.d_model, bias=False) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.wte def set_input_embeddings(self, value: nn.Embedding): self.wte = value @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.blocks[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm_f(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) class DbrxForCausalLM(DbrxPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"} _tp_plan = {"lm_head": "colwise_gather_output"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config: DbrxConfig): super().__init__(config) self.transformer = DbrxModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.router_aux_loss_coef = config.ffn_config.moe_loss_weight self.num_experts = config.ffn_config.moe_num_experts self.num_experts_per_tok = config.ffn_config.moe_top_k self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.transformer.get_input_embeddings() def set_input_embeddings(self, value: nn.Embedding): self.transformer.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Linear: return self.lm_head def set_output_embeddings(self, new_embeddings: nn.Linear): self.lm_head = new_embeddings def set_decoder(self, decoder: DbrxModel): self.transformer = decoder def get_decoder(self) -> DbrxModel: return self.transformer @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, output_router_logits: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> MoeCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, DbrxForCausalLM >> model = DbrxForCausalLM.from_pretrained("transformers-community/dbrx-instruct") >> tokenizer = AutoTokenizer.from_pretrained("transformers-community/dbrx-instruct") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ``` """ output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: MoeModelOutputWithPast = self.transformer( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_router_logits=output_router_logits, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) __all__ = ["DbrxForCausalLM", "DbrxModel", "DbrxPreTrainedModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/dbrx/modular_dbrx.py", "license": "Apache License 2.0", "lines": 472, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/granitemoe/modular_granitemoe.py
# Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring from ...utils.generic import can_return_tuple, merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ..granite.modeling_granite import GraniteRMSNorm, GraniteRotaryEmbedding from ..jetmoe.modeling_jetmoe import JetMoeParallelExperts, JetMoeTopKGating from ..llama.modeling_llama import LlamaAttention, LlamaPreTrainedModel from ..mixtral.modeling_mixtral import MixtralDecoderLayer, MixtralForCausalLM, MixtralModel, load_balancing_loss_func from .configuration_granitemoe import GraniteMoeConfig class GraniteMoeRMSNorm(GraniteRMSNorm): pass class GraniteMoeRotaryEmbedding(GraniteRotaryEmbedding): pass class GraniteMoeParallelExperts(JetMoeParallelExperts): pass class GraniteMoeTopKGating(JetMoeTopKGating): pass class GraniteMoeMoE(nn.Module): """ A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters. """ def __init__(self, config: GraniteMoeConfig): super().__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = GraniteMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2) self.output_linear = GraniteMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size) self.router = GraniteMoeTopKGating( input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok, ) def forward(self, layer_input): bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, _ = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) return layer_output class GraniteMoeAttention(LlamaAttention): def __init__(self, config: GraniteMoeConfig, layer_idx: int): super().__init__(self, config, layer_idx) self.scaling = config.attention_multiplier # Only diff with llama class GraniteMoeDecoderLayer(MixtralDecoderLayer): def __init__(self, config: GraniteMoeConfig, layer_idx: int): super().__init__(config, layer_idx) self.self_attn = GraniteMoeAttention(config=config, layer_idx=layer_idx) self.block_sparse_moe = GraniteMoeMoE(config) self.input_layernorm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) del self.mlp self.block_sparse_moe = GraniteMoeMoE(config) self.residual_multiplier = config.residual_multiplier # Only diff with mixtral! def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs, ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states * self.residual_multiplier # diff residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.block_sparse_moe(hidden_states) hidden_states = residual + hidden_states * self.residual_multiplier # diff return hidden_states @auto_docstring class GraniteMoePreTrainedModel(LlamaPreTrainedModel, PreTrainedModel): config: GraniteMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["GraniteMoeDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = False # TopK gating fails fullgraph compilation at "expert_size = expert_size.tolist()" @torch.no_grad() def _init_weights(self, module): PreTrainedModel._init_weights(self, module) if isinstance(module, GraniteMoeParallelExperts): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) @auto_docstring class GraniteMoeModel(MixtralModel): def __init__(self, config: GraniteMoeConfig): super().__init__(config) self.layers = nn.ModuleList( [GraniteMoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.embedding_multiplier = config.embedding_multiplier @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( # ONLY DIFF WITH MIXTRAL: NO SLIDING config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) inputs_embeds = inputs_embeds * self.embedding_multiplier hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) class GraniteMoeForCausalLM(MixtralForCausalLM): def __init__(self, config: GraniteMoeConfig): super().__init__(config) self.model = GraniteMoeModel(config) self.logits_scaling = config.logits_scaling @auto_docstring @can_return_tuple def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, output_router_logits: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs, ) -> tuple | MoeCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, GraniteMoeForCausalLM >>> model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b") >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, cache_position=cache_position, **kwargs, ) # Only compute necessary logits hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits / self.config.logits_scaling loss = None if labels is not None: # Flatten the tokens loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) __all__ = ["GraniteMoeForCausalLM", "GraniteMoeModel", "GraniteMoePreTrainedModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/granitemoe/modular_granitemoe.py", "license": "Apache License 2.0", "lines": 274, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/jamba/modular_jamba.py
# Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable from typing import Any import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...integrations import lazy_load_kernel from ...masking_utils import create_causal_mask from ...modeling_layers import GenericForSequenceClassification, GradientCheckpointingLayer from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import merge_with_config_defaults from ...utils.import_utils import resolve_internal_import from ...utils.output_capturing import OutputRecorder, capture_outputs from ..llama.modeling_llama import LlamaAttention, LlamaRMSNorm, eager_attention_forward from ..mistral.modeling_mistral import MistralMLP from ..mixtral.modeling_mixtral import MixtralExperts, MixtralForCausalLM from .configuration_jamba import JambaConfig logger = logging.get_logger(__name__) class JambaRMSNorm(LlamaRMSNorm): pass class HybridMambaAttentionDynamicCache: """ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache (which has a constant shape regardless of seq_len). This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. """ is_compileable = False def __init__(self, config, batch_size, dtype=torch.float16, device=None): self.dtype = dtype self.layers_block_type = config.layers_block_type self.has_previous_state = False # only used by mamba intermediate_size = config.mamba_expand * config.hidden_size ssm_state_size = config.mamba_d_state conv_kernel_size = config.mamba_d_conv self.conv_states = [] self.ssm_states = [] self.transformer_layers = [] for i in range(config.num_hidden_layers): if self.layers_block_type[i] == "mamba": self.conv_states += [ torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype) ] self.ssm_states += [ torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype) ] else: self.conv_states += [torch.tensor([[]] * batch_size, device=device)] self.ssm_states += [torch.tensor([[]] * batch_size, device=device)] self.transformer_layers.append(i) self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] def __len__(self): return len(self.key_cache) def __getitem__(self, layer_idx): return self.key_cache[layer_idx], self.value_cache[layer_idx] def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: # Update the cache if self.key_cache[layer_idx].shape[-1] == 0: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" if self.get_seq_length() > 0: for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.conv_states[layer_idx].device self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device)) device = self.ssm_states[layer_idx].device self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device)) def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: """Return the length and offset of the cache, used to generate the mask""" kv_offset = 0 query_length = cache_position.shape[0] kv_length = self.get_seq_length(layer_idx) + query_length return kv_length, kv_offset def get_seq_length(self, layer_idx: int | None = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].shape[-1] == 0: return 0 return self.key_cache[layer_idx].shape[-2] class JambaAttention(LlamaAttention): def __init__(self, config: JambaConfig, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, past_key_values: HybridMambaAttentionDynamicCache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor | None]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) if past_key_values is not None: key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class JambaMambaMixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) """ def __init__(self, config: JambaConfig, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.ssm_state_size = config.mamba_d_state self.conv_kernel_size = config.mamba_d_conv self.intermediate_size = config.mamba_expand * config.hidden_size self.time_step_rank = config.mamba_dt_rank self.use_conv_bias = config.mamba_conv_bias self.use_bias = config.mamba_proj_bias self.conv1d = nn.Conv1d( in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=self.use_conv_bias, kernel_size=self.conv_kernel_size, groups=self.intermediate_size, padding=self.conv_kernel_size - 1, ) self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] # projection of the input hidden states self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias) # selective projection used to make dt, B and C input dependent self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False) # time step projection (discretization) self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.ssm_state_size + 1)[None, :] A = A.expand(self.intermediate_size, -1).contiguous() self.A_log = nn.Parameter(torch.log(A)) self.D = nn.Parameter(torch.ones(self.intermediate_size)) self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias) self.dt_layernorm = JambaRMSNorm(self.time_step_rank, eps=config.rms_norm_eps) self.b_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps) self.c_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps) global causal_conv1d_update, causal_conv1d_fn causal_conv1d = lazy_load_kernel("causal-conv1d") causal_conv1d_update = getattr(causal_conv1d, "causal_conv1d_update", None) causal_conv1d_fn = getattr(causal_conv1d, "causal_conv1d_fn", None) global selective_state_update, mamba_inner_fn, selective_scan_fn mamba_ssm = lazy_load_kernel("mamba-ssm") selective_state_update = resolve_internal_import( mamba_ssm, chained_path="ops.triton.selective_state_update.selective_state_update" ) selective_scan_fn = getattr(mamba_ssm, "selective_scan_fn", None) mamba_inner_fn = getattr(mamba_ssm, "mamba_inner_fn", None) global is_fast_path_available is_fast_path_available = all( (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn) ) if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`" " is None. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d." ) def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: HybridMambaAttentionDynamicCache | None = None, attention_mask: torch.LongTensor | None = None, ): batch_size, seq_len, _ = hidden_states.shape use_precomputed_states = ( cache_params is not None and cache_params.has_previous_state and seq_len == 1 and cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size ) # 1. Gated MLP's linear projection projected_states = self.in_proj(hidden_states).transpose(1, 2) # We can't use `mamba_inner_fn` even if in training and without cache params because we have the # inner layernorms which isn't supported by this fused kernel hidden_states, gate = projected_states.chunk(2, dim=1) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) # 2. Convolution sequence transformation conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) if use_precomputed_states: hidden_states = causal_conv1d_update( hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation, ) hidden_states = hidden_states.unsqueeze(-1) else: if cache_params is not None: conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.conv_states[self.layer_idx].copy_(conv_states) hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) # 3. State Space Model sequence transformation # 3.a. input varying initialization of time_step, B and C ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) time_step, B, C = torch.split( ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 ) time_step = self.dt_layernorm(time_step) B = self.b_layernorm(B) C = self.c_layernorm(C) # Here we need to apply dt_proj without the bias, as the bias is added in the selective scan kernel. # This is a hack to apply dt_proj while still using the forward pass of `torch.nn.Linear`, which is needed # in order to make quantization work. Quantization code replaces `torch.nn.Linear` layers with quantized # linear layers, and requires to call the forward pass directly. # Quantized model can't work with the original code: # ```discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)``` time_proj_bias = self.dt_proj.bias.data with torch.no_grad(): self.dt_proj.bias.data = torch.zeros_like(self.dt_proj.bias.data) discrete_time_step = self.dt_proj(time_step).transpose(1, 2) with torch.no_grad(): self.dt_proj.bias.data = time_proj_bias A = -torch.exp(self.A_log.float()) # 3.c perform the recurrence y ← SSM(A, B, C)(x) time_proj_bias = time_proj_bias.float() if time_proj_bias is not None else None if use_precomputed_states: scan_outputs = selective_state_update( cache_params.ssm_states[self.layer_idx], hidden_states[..., 0], discrete_time_step[..., 0], A, B[:, 0], C[:, 0], self.D, gate[..., 0], time_proj_bias, dt_softplus=True, ).unsqueeze(-1) else: scan_outputs, ssm_state = selective_scan_fn( hidden_states, discrete_time_step, A, B.transpose(1, 2), C.transpose(1, 2), self.D.float(), gate, time_proj_bias, delta_softplus=True, return_last_state=True, ) if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) # 4. Final linear projection contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) return contextualized_states # fmt: off def slow_forward(self, input_states, cache_params: HybridMambaAttentionDynamicCache | None = None, attention_mask: torch.LongTensor | None = None): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection projected_states = self.in_proj(input_states).transpose(1, 2) hidden_states, gate = projected_states.chunk(2, dim=1) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) use_cache = isinstance(cache_params, HybridMambaAttentionDynamicCache) # 2. Convolution sequence transformation if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size: if self.training: # In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass ssm_state = cache_params.ssm_states[self.layer_idx].clone() else: ssm_state = cache_params.ssm_states[self.layer_idx] ssm_state = ssm_state.to(hidden_states.device) if cache_params.has_previous_state and seq_len == 1 and \ cache_params.conv_states[self.layer_idx].shape[0] == batch_size: conv_state = cache_params.conv_states[self.layer_idx] conv_state = torch.roll(conv_state, shifts=-1, dims=-1) conv_state[:, :, -1] = hidden_states[:, :, 0] cache_params.conv_states[self.layer_idx] = conv_state hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) if self.use_conv_bias: hidden_states += self.conv1d.bias hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) else: conv_state = nn.functional.pad( hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0) ) cache_params.conv_states[self.layer_idx] = conv_state hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) else: ssm_state = torch.zeros( (batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype ) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) # 3. State Space Model sequence transformation # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2] ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) time_step, B, C = torch.split( ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 ) time_step = self.dt_layernorm(time_step) B = self.b_layernorm(B) C = self.c_layernorm(C) discrete_time_step = self.dt_proj(time_step) discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM) A = -torch.exp(self.A_log.float()) discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() deltaB_u = discrete_B * hidden_states[:, :, :, None].float() # 3.c perform the recurrence y ← SSM(A, B, C)(x) scan_outputs = [] for i in range(seq_len): ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) scan_outputs.append(scan_output[:, :, 0]) scan_output = torch.stack(scan_outputs, dim=-1) scan_output = scan_output + (hidden_states * self.D[None, :, None]) scan_output = (scan_output * self.act(gate)) if use_cache: cache_params.ssm_states[self.layer_idx] = ssm_state # 4. Final linear projection contextualized_states = self.out_proj(scan_output.transpose(1, 2)) return contextualized_states # fmt: on def forward( self, hidden_states, cache_params: HybridMambaAttentionDynamicCache | None = None, attention_mask: torch.LongTensor | None = None, ): if self.config.use_mamba_kernels and ( not is_fast_path_available or "cuda" not in self.x_proj.weight.device.type ): logger.warning_once( "Fast Mamba kernels are not available. Make sure that they are installed " "and that the mamba module is on a CUDA device. Turning off the fast path " "`config.use_mamba_kernels=False` and falling back to the slow path." ) self.config.use_mamba_kernels = False if self.config.use_mamba_kernels: return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask) return self.slow_forward(hidden_states, cache_params, attention_mask) class JambaMLP(MistralMLP): pass class JambaExperts(MixtralExperts): pass class JambaSparseMoeBlock(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, config: JambaConfig): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size self.num_experts = config.num_experts self.top_k = config.num_experts_per_tok self.router = nn.Linear(self.hidden_dim, self.num_experts, bias=False) self.experts = JambaExperts(config) def route_tokens_to_experts(self, hidden_states, router_logits): routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float) top_k_weights, top_k_index = torch.topk(routing_weights, self.top_k, dim=-1) return top_k_index, top_k_weights.to(hidden_states.dtype) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) router_logits = self.router(hidden_states) top_k_index, top_k_weights = self.route_tokens_to_experts(hidden_states, router_logits) hidden_states = self.experts(hidden_states, top_k_index, top_k_weights) hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim) return hidden_states class JambaAttentionDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: JambaConfig, layer_idx: int): super().__init__() num_experts = config.layers_num_experts[layer_idx] if config.layers_num_experts else 1 self.self_attn = JambaAttention(config, layer_idx) ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP self.feed_forward = ffn_layer_class(config) self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: HybridMambaAttentionDynamicCache | None = None, use_cache: bool | None = False, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = residual + hidden_states return hidden_states class JambaMambaDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: JambaConfig, layer_idx: int): super().__init__() num_experts = config.layers_num_experts[layer_idx] if config.layers_num_experts else 1 self.mamba = JambaMambaMixer(config=config, layer_idx=layer_idx) ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP self.feed_forward = ffn_layer_class(config) self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: HybridMambaAttentionDynamicCache | None = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states = self.mamba( hidden_states=hidden_states, cache_params=past_key_values, attention_mask=attention_mask, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = residual + hidden_states return hidden_states ALL_DECODER_LAYER_TYPES = {"attention": JambaAttentionDecoderLayer, "mamba": JambaMambaDecoderLayer} class JambaPreTrainedModel(PreTrainedModel): config: JambaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["JambaAttentionDecoderLayer", "JambaMambaDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _is_stateful = True _can_record_outputs = { "hidden_states": [JambaAttentionDecoderLayer, JambaMambaDecoderLayer], "attentions": JambaAttention, "router_logits": OutputRecorder(nn.Linear, layer_name="router"), } @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, JambaMambaMixer): A = torch.arange(1, module.ssm_state_size + 1)[None, :] A = A.expand(module.intermediate_size, -1).contiguous() init.copy_(module.A_log, torch.log(A)) init.ones_(module.D) elif isinstance(module, JambaExperts): init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) @auto_docstring class JambaModel(JambaPreTrainedModel): def __init__(self, config: JambaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) decoder_layers = [] for i in range(config.num_hidden_layers): layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]] decoder_layers.append(layer_class(config, layer_idx=i)) self.layers = nn.ModuleList(decoder_layers) self.final_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: HybridMambaAttentionDynamicCache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = HybridMambaAttentionDynamicCache( config=self.config, batch_size=inputs_embeds.shape[0], dtype=inputs_embeds.dtype, device=inputs_embeds.device, ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) mamba_mask = self._update_mamba_mask(attention_mask, cache_position) hidden_states = inputs_embeds for decoder_layer in self.layers: layer_mask = mamba_mask if isinstance(decoder_layer, JambaMambaDecoderLayer) else causal_mask hidden_states = decoder_layer( hidden_states, attention_mask=layer_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.final_layernorm(hidden_states) if past_key_values and not past_key_values.has_previous_state: past_key_values.has_previous_state = True return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) def _update_mamba_mask(self, attention_mask, cache_position): """ No need for zeroing states when 1. Cached forward 2. Attending to all inputs """ mamba_mask = attention_mask if (cache_position is not None and cache_position[0] > 0) or ( attention_mask is not None and torch.all(attention_mask == 1) ): mamba_mask = None return mamba_mask class JambaForCausalLM(MixtralForCausalLM): def __init__(self, config: JambaConfig): super().__init__(config) self.num_experts = config.num_experts def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: HybridMambaAttentionDynamicCache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, output_router_logits: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> MoeCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, JambaForCausalLM >>> model = JambaForCausalLM.from_pretrained("ai21labs/Jamba-v0.1") >>> tokenizer = AutoTokenizer.from_pretrained("ai21labs/Jamba-v0.1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" return super().forward( input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, labels, use_cache, cache_position, logits_to_keep, **kwargs, ) class JambaForSequenceClassification(GenericForSequenceClassification, JambaPreTrainedModel): pass __all__ = ["JambaForCausalLM", "JambaForSequenceClassification", "JambaModel", "JambaPreTrainedModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/jamba/modular_jamba.py", "license": "Apache License 2.0", "lines": 678, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/jetmoe/modular_jetmoe.py
# Copyright 2024 JetMoe AI and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch JetMoe model.""" from collections.abc import Callable import torch from torch import nn from torch.nn import functional as F from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_layers import ( GenericForSequenceClassification, ) from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.generic import merge_with_config_defaults from ...utils.output_capturing import OutputRecorder, capture_outputs from ..llama.modeling_llama import LlamaDecoderLayer from ..mixtral.modeling_mixtral import ( MixtralModel, MixtralPreTrainedModel, MixtralRMSNorm, MixtralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, load_balancing_loss_func, ) from .configuration_jetmoe import JetMoeConfig logger = logging.get_logger(__name__) class JetMoeRMSNorm(MixtralRMSNorm): pass class JetMoeRotaryEmbedding(MixtralRotaryEmbedding): pass class JetMoeParallelExperts(nn.Module): def __init__(self, num_experts: int, input_size: int, output_size: int) -> None: """ Initialize the JetMoeParallelExperts module. The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py) used in vllm. Args: num_experts (int): Number of experts. input_size (int): Size of the input. output_size (int): Size of the output. """ super().__init__() self.weight = nn.Parameter(torch.empty(num_experts, output_size, input_size)) self.num_experts = num_experts self.input_size = input_size self.output_size = output_size def forward(self, inputs, expert_size): """ Forward pass of the JetMoeParallelExperts module. Args: inputs (Tensor): Input tensor. expert_size: Expert size information. Returns: Tensor: Output tensor. """ input_list = inputs.split(expert_size, dim=0) output_list = [] for i in range(self.num_experts): output_list.append(F.linear(input_list[i], self.weight[i])) results = torch.cat(output_list, dim=0) return results class JetMoeTopKGating(nn.Module): def __init__(self, input_size: int, num_experts: int, top_k: int): """ Initialize the top-k gating mechanism. Args: input_size (`int`): Size of the input. num_experts (`int`): Number of experts. top_k (`int`): Number of top experts to select. """ super().__init__() self.num_experts = num_experts self.input_size = input_size self.top_k = top_k self.layer = nn.Linear(input_size, num_experts, bias=False) def forward(self, hidden_states): # compute the top_k routing decision logits = self.layer(hidden_states).float() # [batch_size x seq_len, num_experts] top_k_logits, top_k_indices = logits.topk(self.top_k, dim=1) # [num_tokens, top_k] top_k_gates = torch.softmax(top_k_logits, dim=1).type_as(hidden_states) # [num_tokens, top_k] # compute number of input given to each expert zeros = torch.zeros( [top_k_gates.size(0), self.num_experts], dtype=top_k_gates.dtype, device=top_k_gates.device ) # [num_tokens, num_experts] gates = zeros.scatter(1, top_k_indices, 1) # [num_tokens, num_experts] expert_size = gates.long().sum(0) # [num_experts,] # (This cause torch.compile to fail with `torch._dynamo.exc.Unsupported: Backend compiler failed with a fake tensor exception at`) # (and `DataDependentOutputException`) expert_size = expert_size.tolist() # sort and group input tokens according to expert assignment top_k_experts = top_k_indices.flatten() # [num_tokens * top_k] _, index_sorted_experts = top_k_experts.sort(0) # [num_tokens * top_k] batch_index = index_sorted_experts.div(self.top_k, rounding_mode="trunc") # [num_tokens * top_k] # gather the gate values for grouped input tokens top_k_gates = top_k_gates.flatten() # [num_tokens * top_k] batch_gates = top_k_gates[index_sorted_experts] # [num_tokens * top_k] return index_sorted_experts, batch_index, batch_gates, expert_size, logits class JetMoeMoE(nn.Module): """ A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters. """ def __init__(self, config: JetMoeConfig): super().__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.activation_function] self.bias = torch.nn.Parameter(torch.empty(self.input_size)) self.input_linear = JetMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2) self.output_linear = JetMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size) self.router = JetMoeTopKGating( input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok, ) def forward(self, layer_input): """ Forward pass of the mixture of experts layer. Args: layer_input (Tensor): Input tensor. Returns: Tensor: Output tensor. Tensor: Router logits. """ bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) layer_output = layer_output + self.bias return layer_output class JetMoeMoA(nn.Module): """ A Sparsely gated mixture of attention layer with pairs of query- and output-projections as experts. Args: config: Configuration object with model hyperparameters. """ def __init__(self, config: JetMoeConfig): super().__init__() self.num_experts = config.num_local_experts self.input_size = config.hidden_size self.hidden_size = config.kv_channels * config.num_key_value_heads self.top_k = config.num_experts_per_tok self.bias = torch.nn.Parameter(torch.empty(self.input_size)) self.input_linear = JetMoeParallelExperts(self.num_experts, self.input_size, self.hidden_size) self.output_linear = JetMoeParallelExperts(self.num_experts, self.hidden_size, self.input_size) self.router = JetMoeTopKGating( input_size=self.input_size, num_experts=self.num_experts, top_k=self.top_k, ) def map(self, layer_input): """ Map inputs to attention experts according to routing decision and compute query projection inside each experts. """ # Compute gating topology bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) # [bsz * length, emb_size] index_sorted_experts, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) topo_info = (index_sorted_experts, batch_index, batch_gates, expert_size) # Group inputs according to topology and compute query projection expert_inputs = layer_input[batch_index] # [bsz * length * top_k, emb_size] expert_outputs = self.input_linear(expert_inputs, expert_size) # [bsz * length * top_k, hidden_size] # Ungroup queries back to original order zeros = torch.zeros( (bsz * length * self.top_k, self.hidden_size), dtype=expert_outputs.dtype, device=expert_outputs.device ) layer_output = zeros.index_add(0, index_sorted_experts, expert_outputs) layer_output = layer_output.view(bsz, length, self.top_k, -1) # [bsz, length, top_k, hidden_size] return layer_output, router_logits, topo_info def reduce(self, layer_input, topo_info): """ Compute output projection inside each attention experts and merge the outputs of different experts. """ bsz, length, k, hidden_size = layer_input.size() layer_input = layer_input.reshape(-1, hidden_size) # [bsz * length * k, hidden_size] index_sorted_experts, batch_index, batch_gates, expert_size = topo_info # Group inputs according to topology and compute output projection expert_inputs = layer_input[index_sorted_experts] # [bsz * length * top_k, hidden_size] expert_outputs = self.output_linear(expert_inputs, expert_size) # [bsz * length * top_k, emb_size] # Apply gates to attention expert outputs expert_outputs = expert_outputs * batch_gates[:, None] # Ungroup and merge outputs to original order zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) layer_output = layer_output + self.bias return layer_output def forward(self, layer_input): raise NotImplementedError("This module doesn't support call and forward.") class JetMoeAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. """ def __init__(self, config: JetMoeConfig, layer_idx: int | None = None): """ Initialize the JetMoeAttention module. Args: config: Configuration object with model hyperparameters. layer_idx: Index of the layer in the model. """ super().__init__() self.config = config self.layer_idx = layer_idx self.is_causal = True if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.num_key_value_groups = 1 # We ignore this by setting it to 1 as we have different repeat patterns self.top_k = config.num_experts_per_tok self.attention_dropout = config.attention_dropout self.kv_projection_size = config.kv_channels * config.num_key_value_heads self.num_key_value_heads = config.num_key_value_heads self.num_heads = config.num_attention_heads self.head_dim = config.kv_channels self.scaling = self.head_dim**-0.5 self.experts = JetMoeMoA(config) self.kv_proj = torch.nn.Linear(config.hidden_size, self.kv_projection_size * 2, bias=False) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_embeddings: torch.LongTensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states, router_logits, topo_info = self.experts.map(hidden_states) key_states, value_states = self.kv_proj(hidden_states).chunk(2, dim=-1) query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) # This is different from other models where we repeat k/v heads # instead of repeat interleaving them key_states = key_states.repeat(1, self.top_k, 1, 1) value_states = value_states.repeat(1, self.top_k, 1, 1) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.view(*input_shape, self.top_k, -1) attn_output = self.experts.reduce(attn_output, topo_info) attn_output = attn_output.view(*input_shape, -1) return attn_output, attn_weights, router_logits class JetMoeDecoderLayer(LlamaDecoderLayer): def __init__(self, config: JetMoeConfig, layer_idx: int | None = None): super().__init__(config, layer_idx) self.input_layernorm = JetMoeRMSNorm(config.hidden_size) self.self_attention = JetMoeAttention(config, layer_idx) self.post_attention_layernorm = JetMoeRMSNorm(config.hidden_size) self.mlp = JetMoeMoE(config) del self.self_attn def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, use_cache: bool | None = False, cache_position: torch.LongTensor | None = None, position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _, _ = self.self_attention( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring class JetMoePreTrainedModel(MixtralPreTrainedModel): _can_record_outputs = { "router_logits": [OutputRecorder(JetMoeAttention, index=2), OutputRecorder(JetMoeTopKGating, index=4)], "hidden_states": JetMoeDecoderLayer, "attentions": OutputRecorder(JetMoeAttention, index=1), } config: JetMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = False _no_split_modules = ["JetMoeDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = False # TopK gating fails fullgraph compilation at "expert_size = expert_size.tolist()" @torch.no_grad() def _init_weights(self, module): """Initialize the weights.""" PreTrainedModel._init_weights(self, module) if isinstance(module, JetMoeParallelExperts): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) elif isinstance(module, JetMoeMoA | JetMoeMoE): init.zeros_(module.bias) @auto_docstring class JetMoeModel(MixtralModel): def __init__(self, config: JetMoeConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [JetMoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = JetMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) class JetMoeForCausalLM(JetMoePreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} def __init__(self, config): super().__init__(config) self.model = JetMoeModel(config) self.vocab_size = config.vocab_size self.aux_loss_coef = config.aux_loss_coef self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.tie_word_embeddings = config.tie_word_embeddings self.num_experts = config.num_local_experts self.num_experts_per_tok = config.num_experts_per_tok # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, output_router_logits: bool | None = False, **kwargs, ) -> MoeCausalLMOutputWithPast: outputs: MoeModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, output_router_logits=output_router_logits, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) class JetMoeForSequenceClassification(GenericForSequenceClassification, JetMoePreTrainedModel): ... __all__ = ["JetMoeForCausalLM", "JetMoeModel", "JetMoePreTrainedModel", "JetMoeForSequenceClassification"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/jetmoe/modular_jetmoe.py", "license": "Apache License 2.0", "lines": 509, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/olmoe/modular_olmoe.py
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OLMoE model.""" from collections.abc import Callable import torch from torch import nn from ... import initialization as init from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_outputs import MoeModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.output_capturing import OutputRecorder from ..gemma.modeling_gemma import GemmaMLP from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaRMSNorm, LlamaRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from ..mixtral.modeling_mixtral import MixtralExperts, MixtralForCausalLM, MixtralModel from ..qwen2_moe.modeling_qwen2_moe import Qwen2MoeTopKRouter from .configuration_olmoe import OlmoeConfig logger = logging.get_logger(__name__) class OlmoeRMSNorm(LlamaRMSNorm): def __init__(self, hidden_size, eps=1e-5): super().__init__(hidden_size, eps) class OlmoeRotaryEmbedding(LlamaRotaryEmbedding): pass class OlmoeMLP(GemmaMLP): pass class OlmoeAttention(LlamaAttention): def __init__(self, config: OlmoeConfig, layer_idx: int | None = None): super().__init__(config, layer_idx) self.q_norm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.k_norm = OlmoeRMSNorm( (config.hidden_size // config.num_attention_heads) * config.num_key_value_heads, eps=config.rms_norm_eps ) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states)) key_states = self.k_norm(self.k_proj(hidden_states)) value_states = self.v_proj(hidden_states) if self.config.clip_qkv is not None: # Diff with llama query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) query_states = query_states.view(*hidden_shape).transpose(1, 2) key_states = key_states.view(*hidden_shape).transpose(1, 2) value_states = value_states.view(*hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class OlmoeExperts(MixtralExperts): pass class OlmoeTopKRouter(Qwen2MoeTopKRouter): pass class OlmoeSparseMoeBlock(nn.Module): def __init__(self, config): super().__init__() self.gate = OlmoeTopKRouter(config) self.experts = OlmoeExperts(config) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) _, top_k_weights, top_k_index = self.gate(hidden_states) final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights).reshape( batch_size, sequence_length, hidden_dim ) return final_hidden_states class OlmoeDecoderLayer(LlamaDecoderLayer): def __init__(self, config: OlmoeConfig, layer_idx: int): super().__init__(config, layer_idx) self.hidden_size = config.hidden_size self.self_attn = OlmoeAttention(config=config, layer_idx=layer_idx) self.mlp = OlmoeSparseMoeBlock(config) self.input_layernorm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @auto_docstring class OlmoePreTrainedModel(PreTrainedModel): config: OlmoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["OlmoeDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _can_record_outputs = { "router_logits": OutputRecorder(OlmoeTopKRouter, index=0), "hidden_states": OlmoeDecoderLayer, "attentions": OlmoeAttention, } _supports_attention_backend = True @torch.no_grad() def _init_weights(self, module): PreTrainedModel._init_weights(self, module) if isinstance(module, OlmoeExperts): init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) elif isinstance(module, OlmoeTopKRouter): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) @auto_docstring class OlmoeModel(MixtralModel): def __init__(self, config: OlmoeConfig): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [OlmoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = OlmoeRotaryEmbedding(config=config) def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( # diff with mixtral: no sliding config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) class OlmoeForCausalLM(MixtralForCausalLM, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} def __init__(self, config): super().__init__(config) self.model = OlmoeModel(config) self.num_experts = config.num_experts def forward(self, **super_kwargs): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, OlmoeForCausalLM >>> model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924") >>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m' ``` """ return super().forward(**super_kwargs) __all__ = ["OlmoeForCausalLM", "OlmoeModel", "OlmoePreTrainedModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/olmoe/modular_olmoe.py", "license": "Apache License 2.0", "lines": 231, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/phimoe/modular_phimoe.py
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Phimoe model.""" from collections.abc import Callable import torch from torch import nn from ...modeling_layers import ( GenericForSequenceClassification, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...utils.generic import maybe_autocast from ...utils.output_capturing import OutputRecorder from ..llama.modeling_llama import LlamaAttention from ..mixtral.modeling_mixtral import ( MixtralDecoderLayer, MixtralExperts, MixtralForCausalLM, MixtralModel, MixtralPreTrainedModel, MixtralRotaryEmbedding, ) from .configuration_phimoe import PhimoeConfig class PhimoeRotaryEmbedding(MixtralRotaryEmbedding): def __init__(self, config: PhimoeConfig, device=None): nn.Module.__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] self.rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False) def forward(self, x, position_ids=None, layer_type=None): if layer_type is not None: raise ValueError( f"{self.__class__.__name__} does not support layer types, but got `layer_type={layer_type}`" ) mscale = None seq_len = torch.max(position_ids) + 1 if self.config.rope_parameters["rope_type"] != "default" and seq_len: mscale = ( self.config.rope_parameters["long_mscale"] if seq_len > self.config.rope_parameters["original_max_position_embeddings"] else self.config.rope_parameters["short_mscale"] ) inv_freq, attention_scaling = self.rope_init_fn(self.config, x.device, seq_len) mscale = attention_scaling if mscale is None else mscale inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * mscale sin = emb.sin() * mscale return cos.to(x.dtype), sin.to(x.dtype) class PhimoeAttention(LlamaAttention): pass class PhimoeMultiplier(torch.autograd.Function): @staticmethod def forward( ctx, scores: torch.Tensor, multiplier: torch.Tensor, selected_experts: torch.Tensor, masked_gates: torch.Tensor, mask_for_one: torch.Tensor, ): """ Forward pass for the custom autograd function. Args: ctx: Context object to save information for backward computation. scores (torch.Tensor): Input scores tensor. multiplier (torch.Tensor): Multiplier tensor. selected_experts (torch.Tensor): Tensor of selected experts. masked_gates (torch.Tensor): Masked gates tensor. mask_for_one (torch.Tensor): Mask for one tensor. Returns: torch.Tensor: Result of the forward pass. """ ctx.save_for_backward(multiplier, selected_experts, masked_gates) return multiplier * mask_for_one @staticmethod def backward( ctx, grad_at_output: torch.Tensor, ): """ Backward pass for the custom autograd function. Args: ctx: Context object with saved tensors from the forward pass. grad_at_output (torch.Tensor): Gradient at the output. Returns: tuple[torch.Tensor, None, None, None, None]: Gradients for the inputs. """ multiplier, selected_experts, masked_gates = ctx.saved_tensors grad_at_output = grad_at_output * multiplier grad_at_scores_expanded = masked_gates * grad_at_output.mul(-1) grad_at_scores_expanded.scatter_add_( dim=-1, index=selected_experts, src=grad_at_output, ) return ( grad_at_scores_expanded, None, None, None, None, ) def sparsemixer(scores, jitter_eps, training, top_k=2): """ Sparse mixer function to select top-k experts and compute multipliers. Based on the paper: https://huggingface.co/papers/2409.12136 We first replace the TopK(·) function as random sampling of discrete variables in model training. Then, following Liu et al. (2023a) and Liu et al. (2023b), we apply Heun's third order method to approximate the expert routing gradient and construct a modified back-propagation to give a mathematically sound gradient estimation for expert routing. Args: scores (torch.Tensor): Input scores tensor. jitter_eps (float): Jitter epsilon for numerical stability. training (bool): Flag indicating if the model is in training mode. top_k (int): Number of top experts to select. Returns: tuple[torch.Tensor, torch.Tensor]: Multiplier and selected experts tensors. """ with torch.no_grad(): # Compute mask for sparsity mask_logits_threshold, max_ind = scores.max(dim=-1, keepdim=True) factor = scores.abs().clamp(min=mask_logits_threshold) mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (2 * jitter_eps) # Apply mask masked_gates = scores.masked_fill(mask_logits_threshold, float("-inf")) if training: selected_experts = ( ( masked_gates - torch.empty_like(masked_gates, memory_format=torch.legacy_contiguous_format).exponential_().log() ) .max(dim=-1)[1] .unsqueeze(-1) ) # Gumbel sampling, more robust than the multinomial method else: selected_experts = max_ind # Compute scores for gradients masked_gates = torch.softmax(masked_gates, dim=-1) multiplier_o = masked_gates.gather(dim=-1, index=selected_experts) if training: # Compute midpoint mask max_scores, max_ind = masked_gates.max(dim=-1, keepdim=True) mask_for_one = torch.logical_or( selected_experts == max_ind, torch.rand_like(max_scores) > 0.75, # Heun's third-order method ) # 1 -> 1.0 & 0 -> 1./3: lambda x: (x + 0.5) / 1.5 mask_for_one = torch.add(0.3333, mask_for_one, alpha=0.6667).type_as(masked_gates) multiplier = PhimoeMultiplier.apply( scores, multiplier_o, selected_experts, masked_gates, mask_for_one, ) else: multiplier = multiplier_o # Masked out first expert masked_scores = torch.scatter( scores, -1, selected_experts, float("-inf"), ) with torch.no_grad(): # Compute mask for sparsity mask_logits_threshold, max_ind = masked_scores.max(dim=-1, keepdim=True) factor = scores.abs().clamp(min=mask_logits_threshold) mask_logits_threshold = ((mask_logits_threshold - scores) / factor) > (2 * jitter_eps) # Apply mask masked_gates_top2 = masked_scores.masked_fill(mask_logits_threshold, float("-inf")) if training: selected_experts_top2 = ( ( masked_gates_top2 - torch.empty_like(masked_gates_top2, memory_format=torch.legacy_contiguous_format) .exponential_() .log() ) .max(dim=-1)[1] .unsqueeze(-1) ) # Gumbel sampling, more robust than the multinomial method else: selected_experts_top2 = max_ind # Compute scores for gradients masked_gates_top2 = torch.softmax(masked_gates_top2, dim=-1) multiplier_top2_o = masked_gates_top2.gather(dim=-1, index=selected_experts_top2) if training: # Compute midpoint mask max_scores, max_ind = masked_gates_top2.max(dim=-1, keepdim=True) mask_for_one_top2 = torch.logical_or( selected_experts_top2 == max_ind, torch.rand_like(max_scores).uniform_() > 0.75, # Heun's third-order method ) # 1 -> 1.0 & 0 -> 1./3: lambda x: (x + 0.5) / 1.5 mask_for_one_top2 = torch.add(0.3333, mask_for_one_top2, alpha=0.6667).type_as(masked_gates_top2) multiplier_top2 = PhimoeMultiplier.apply( scores, multiplier_top2_o, selected_experts_top2, masked_gates_top2, mask_for_one_top2, ) else: multiplier_top2 = multiplier_top2_o multiplier = torch.concat((multiplier, multiplier_top2), dim=-1) selected_experts = torch.concat((selected_experts, selected_experts_top2), dim=-1) return ( multiplier, selected_experts, ) class PhimoeExperts(MixtralExperts): pass class PhimoeTopKRouter(nn.Linear): def __init__(self, config: PhimoeConfig): super().__init__(config.hidden_size, config.num_local_experts, bias=False) self.router_jitter_noise = config.router_jitter_noise self.input_jitter_noise = config.input_jitter_noise self.top_k = config.num_experts_per_tok def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: if self.training and self.input_jitter_noise > 0: hidden_states *= torch.empty_like(hidden_states).uniform_( 1.0 - self.input_jitter_noise, 1.0 + self.input_jitter_noise ) router_logits = super().forward(hidden_states) routing_weights, selected_experts = sparsemixer( router_logits, jitter_eps=self.router_jitter_noise, training=self.training, top_k=self.top_k ) return router_logits, routing_weights, selected_experts class PhimoeSparseMoeBlock(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, config): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok self.router = PhimoeTopKRouter(config) self.experts = PhimoeExperts(config) self.input_jitter_noise = config.input_jitter_noise def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape if self.training and self.input_jitter_noise > 0: hidden_states *= torch.empty_like(hidden_states).uniform_( 1.0 - self.input_jitter_noise, 1.0 + self.input_jitter_noise ) batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_dim) _, routing_weights, selected_experts = self.router(hidden_states) final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights) return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) class PhimoeDecoderLayer(MixtralDecoderLayer): def __init__(self, config: PhimoeConfig, layer_idx: int): super().__init__(config, layer_idx) # Phimoe uses nn.LayerNorm self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True) self.post_attention_layernorm = nn.LayerNorm( config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True ) class PhimoePreTrainedModel(MixtralPreTrainedModel): _can_record_outputs = { "router_logits": OutputRecorder(PhimoeTopKRouter, index=0), "hidden_states": PhimoeDecoderLayer, "attentions": PhimoeAttention, } class PhimoeModel(MixtralModel): def __init__(self, config: PhimoeConfig): super().__init__(config) self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, elementwise_affine=True) class PhimoeForCausalLM(MixtralForCausalLM): def __init__(self, config): super().__init__(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=self.config.lm_head_bias) # Copied from transformers.models.phi3.modeling_phi3.Phi3ForCausalLM.prepare_inputs_for_generation def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, logits_to_keep=None, **kwargs, ): # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the # process # When the first time input length reached long and short factor switching point, enforce re-compute cache # It will cause downside of slower at this single token position, however, better than current failure. if ( past_key_values and hasattr(self.config, "original_max_position_embeddings") and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1 ): past_length = cache_position[0] if past_length <= self.config.original_max_position_embeddings: past_key_values = None model_inputs = super().prepare_inputs_for_generation( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, logits_to_keep=logits_to_keep, **kwargs, ) return model_inputs class PhimoeForSequenceClassification(GenericForSequenceClassification, PhimoePreTrainedModel): ... __all__ = [ "PhimoePreTrainedModel", "PhimoeModel", "PhimoeForCausalLM", "PhimoeForSequenceClassification", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/phimoe/modular_phimoe.py", "license": "Apache License 2.0", "lines": 348, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/qwen2_moe/modular_qwen2_moe.py
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen2MoE model.""" import torch import torch.nn.functional as F from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_layers import ( GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, ) from ...modeling_outputs import MoeModelOutputWithPast from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring from ...utils.generic import merge_with_config_defaults from ...utils.output_capturing import OutputRecorder, capture_outputs from ..gemma.modeling_gemma import GemmaMLP from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaRMSNorm from ..mixtral.modeling_mixtral import ( MixtralExperts, MixtralForCausalLM, MixtralModel, MixtralPreTrainedModel, ) from .configuration_qwen2_moe import Qwen2MoeConfig class Qwen2MoeRMSNorm(LlamaRMSNorm): pass class Qwen2MoeRotaryEmbedding(Gemma2RotaryEmbedding): pass class Qwen2MoeMLP(GemmaMLP): def __init__(self, config, intermediate_size=None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] class Qwen2MoeAttention(LlamaAttention): def __init__(self, config: Qwen2MoeConfig, layer_idx: int): super().__init__(config, layer_idx) if self.config.layer_types[layer_idx] == "sliding_attention": self.sliding_window = config.sliding_window self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.qkv_bias) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.qkv_bias) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.qkv_bias) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) class Qwen2MoeExperts(MixtralExperts): def __init__(self, config): super().__init__(config) self.num_experts = config.num_experts self.intermediate_dim = config.moe_intermediate_size class Qwen2MoeTopKRouter(nn.Module): def __init__(self, config): super().__init__() self.top_k = config.num_experts_per_tok self.num_experts = config.num_experts self.norm_topk_prob = config.norm_topk_prob self.hidden_dim = config.hidden_size self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim)) def forward(self, hidden_states): hidden_states = hidden_states.reshape(-1, self.hidden_dim) router_logits = F.linear(hidden_states, self.weight) # (seq_len, num_experts) router_logits = torch.nn.functional.softmax(router_logits, dtype=torch.float, dim=-1) router_top_value, router_indices = torch.topk(router_logits, self.top_k, dim=-1) # (seq_len, top_k) if self.norm_topk_prob: router_top_value /= router_top_value.sum(dim=-1, keepdim=True) router_top_value = router_top_value.to(router_logits.dtype) router_scores = router_top_value return router_logits, router_scores, router_indices class Qwen2MoeSparseMoeBlock(nn.Module): def __init__(self, config): super().__init__() self.gate = Qwen2MoeTopKRouter(config) self.experts = Qwen2MoeExperts(config) self.shared_expert = Qwen2MoeMLP(config, intermediate_size=config.shared_expert_intermediate_size) self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states_reshaped = hidden_states.view(-1, hidden_dim) shared_expert_output = self.shared_expert(hidden_states_reshaped) _, routing_weights, selected_experts = self.gate(hidden_states_reshaped) expert_output = self.experts(hidden_states_reshaped, selected_experts, routing_weights) shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states_reshaped)) * shared_expert_output expert_output += shared_expert_output expert_output = expert_output.reshape(batch_size, sequence_length, hidden_dim) return expert_output class Qwen2MoeDecoderLayer(LlamaDecoderLayer): def __init__(self, config: Qwen2MoeConfig, layer_idx: int): nn.Module.__init__(self) self.self_attn = Qwen2MoeAttention(config, layer_idx) if (layer_idx not in config.mlp_only_layers) and ( config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0 ): self.mlp = Qwen2MoeSparseMoeBlock(config) else: self.mlp = Qwen2MoeMLP(config, intermediate_size=config.intermediate_size) self.input_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.hidden_size = config.hidden_size @auto_docstring class Qwen2MoePreTrainedModel(MixtralPreTrainedModel): _can_record_outputs = { "router_logits": OutputRecorder(Qwen2MoeTopKRouter, index=0), "hidden_states": Qwen2MoeDecoderLayer, "attentions": Qwen2MoeAttention, } @auto_docstring class Qwen2MoeModel(MixtralModel): def __init__(self, config: Qwen2MoeConfig): super().__init__(config) self.layers = nn.ModuleList( [Qwen2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen2MoeRotaryEmbedding(config=config) @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "inputs_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for i, decoder_layer in enumerate(self.layers[: self.config.num_hidden_layers]): hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[self.config.layer_types[i]], position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class Qwen2MoeForCausalLM(MixtralForCausalLM, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} _tp_plan = {"lm_head": "colwise_gather_output"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.num_experts = config.num_experts self.model = Qwen2MoeModel(config) class Qwen2MoeForSequenceClassification(GenericForSequenceClassification, Qwen2MoePreTrainedModel): ... class Qwen2MoeForTokenClassification(GenericForTokenClassification, Qwen2MoePreTrainedModel): ... class Qwen2MoeForQuestionAnswering(GenericForQuestionAnswering, Qwen2MoePreTrainedModel): ... __all__ = [ "Qwen2MoeForCausalLM", "Qwen2MoeForQuestionAnswering", "Qwen2MoeModel", "Qwen2MoePreTrainedModel", "Qwen2MoeForSequenceClassification", "Qwen2MoeForTokenClassification", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/qwen2_moe/modular_qwen2_moe.py", "license": "Apache License 2.0", "lines": 218, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/switch_transformers/modular_switch_transformers.py
# Copyright 2022 SwitchTransformers Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SwitchTransformers model.""" import copy import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ... import initialization as init from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput, ) from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import ( TransformersKwargs, auto_docstring, is_torchdynamo_compiling, logging, ) from ...utils.generic import ( can_return_tuple, merge_with_config_defaults, ) from ...utils.output_capturing import OutputRecorder, capture_outputs from ..t5.modeling_t5 import T5Attention, T5DenseActDense, T5LayerCrossAttention, T5LayerNorm, T5LayerSelfAttention from .configuration_switch_transformers import SwitchTransformersConfig logger = logging.get_logger(__name__) #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### def router_z_loss_func(router_logits: torch.Tensor) -> float: r""" Compute the router z-loss implemented in PyTorch. The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://huggingface.co/papers/2202.08906). It encourages router logits to remain small in an effort to improve stability. Args: router_logits (`float`): Input logits of shape [batch_size, sequence_length, num_experts] Returns: Scalar router z-loss. """ num_groups, tokens_per_group, _ = router_logits.shape log_z = torch.logsumexp(router_logits, dim=-1) z_loss = log_z**2 return torch.sum(z_loss) / (num_groups * tokens_per_group) def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, sequence_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, sequence_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) class SwitchTransformersTop1Router(nn.Module): """ Router using tokens choose top-1 experts assignment. This router uses the same mechanism as in Switch Transformer (https://huggingface.co/papers/2101.03961) and V-MoE (https://huggingface.co/papers/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. """ def __init__(self, config: SwitchTransformersConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.jitter_noise = config.router_jitter_noise self.ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: r""" Computes router probabilities from input hidden states. Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: router_probabilities (`torch.Tensor`): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor`): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ # float32 is used to ensure stability. See the discussion of "selective precision" in # https://huggingface.co/papers/2101.03961. # We also store the previous dtype to cast back the output to the previous dtype self.input_dtype = hidden_states.dtype hidden_states = hidden_states.to(self.dtype) if self.training and self.jitter_noise > 0: # Multiply the token inputs by the uniform distribution - adding some noise hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) router_logits = self.classifier(hidden_states) # Apply Softmax and cast back to the original `dtype` router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype) router_logits, expert_index = torch.max(router_probs, dim=-1, keepdim=True) expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) token_priority = torch.cumsum(expert_index, dim=-2) # mask if the token routed to the expert will overflow expert_capacity_mask = token_priority <= self.expert_capacity expert_index = expert_index * expert_capacity_mask router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) return router_probs, expert_index, router_logits class SwitchTransformersLayerNorm(T5LayerNorm): pass class SwitchTransformersDenseActDense(T5DenseActDense): pass class SwitchTransformersExperts(nn.ModuleDict): def __init__(self, config: SwitchTransformersConfig): super().__init__() self.num_experts = config.num_experts for idx in range(config.num_experts): self[f"expert_{idx}"] = SwitchTransformersDenseActDense(config) def forward( self, hidden_states: torch.Tensor, selected_experts: torch.Tensor, routing_weights: torch.Tensor ) -> torch.Tensor: final_hidden_states = torch.zeros_like(hidden_states) expert_mask = selected_experts.permute(2, 1, 0) expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() for expert_idx in expert_hit: idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0)) current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1]) current_hidden_states = self[f"expert_{expert_idx[0]}"](current_state) * routing_weights[top_x, idx, None] final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) return final_hidden_states class SwitchTransformersSparseMLP(nn.Module): # inherit from mixtral def __init__(self, config: SwitchTransformersConfig): super().__init__() self.router = SwitchTransformersTop1Router(config) self.experts = SwitchTransformersExperts(config) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) _, selected_experts, routing_weights = self.router(hidden_states) hidden_states = self.experts(hidden_states, selected_experts, routing_weights) hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim) return hidden_states class SwitchTransformersLayerFF(nn.Module): r""" Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module. Parameters: config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. is_sparse (`bool`): Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not """ def __init__(self, config: SwitchTransformersConfig, is_sparse=False): super().__init__() self.is_sparse = is_sparse # Check if it is a sparse layer, if not then it is a dense layer if not self.is_sparse: self.mlp = SwitchTransformersDenseActDense(config) else: self.mlp = SwitchTransformersSparseMLP(config) self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states, **kwargs): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.mlp(forwarded_states) output = hidden_states + self.dropout(forwarded_states) return output class SwitchTransformersAttention(T5Attention): pass class SwitchTransformersLayerSelfAttention(T5LayerSelfAttention): pass class SwitchTransformersLayerCrossAttention(T5LayerCrossAttention): pass class SwitchTransformersBlock(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, is_sparse=False, layer_idx: int | None = None): super().__init__() self.is_decoder = config.is_decoder self.is_sparse = is_sparse self.layer = nn.ModuleList() self.layer.append( SwitchTransformersLayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) ) if self.is_decoder: self.layer.append(SwitchTransformersLayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(SwitchTransformersLayerFF(config, is_sparse=self.is_sparse)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, past_key_values=None, use_cache=False, cache_position=None, **kwargs, ): hidden_states, _ = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, ) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: hidden_states, _ = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, cache_position=cache_position, ) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return hidden_states @auto_docstring class SwitchTransformersPreTrainedModel(PreTrainedModel): config: SwitchTransformersConfig base_model_prefix = "switch_transformers" supports_gradient_checkpointing = True _can_compile_fullgraph = False _no_split_modules = ["SwitchTransformersBlock"] @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, SwitchTransformersLayerNorm): init.constant_(module.weight, factor * 1.0) elif isinstance( module, (SwitchTransformersModel, SwitchTransformersForConditionalGeneration, SwitchTransformersEncoderModel), ): init.normal_(module.shared.weight, mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: init.normal_(module.lm_head.weight, mean=0.0, std=factor * 1.0) elif isinstance(module, SwitchTransformersDenseActDense): init.normal_(module.wi.weight, mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: init.zeros_(module.wi.bias) init.normal_(module.wo.weight, mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: init.zeros_(module.wo.bias) elif isinstance(module, SwitchTransformersAttention): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads init.normal_(module.q.weight, mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) init.normal_(module.k.weight, mean=0.0, std=factor * (d_model**-0.5)) init.normal_(module.v.weight, mean=0.0, std=factor * (d_model**-0.5)) init.normal_(module.o.weight, mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: init.normal_(module.relative_attention_bias.weight, mean=0.0, std=factor * ((d_model) ** -0.5)) elif isinstance(module, SwitchTransformersSparseMLP): d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads init.normal_(module.router.classifier.weight, mean=0.0, std=factor * 1) for idx in range(self.config.num_experts): init.normal_(module.experts[f"expert_{idx}"].wi.weight, mean=0.0, std=factor * (d_model**-0.5)) init.normal_(module.experts[f"expert_{idx}"].wo.weight, mean=0.0, std=factor * (d_model**-0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In SwitchTransformers it is usually set" " to the pad_token_id. See SwitchTransformers docs for more information" ) shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class SwitchTransformersStack(SwitchTransformersPreTrainedModel): _can_record_outputs = { "hidden_states": SwitchTransformersBlock, "attentions": OutputRecorder(SwitchTransformersAttention, index=-1, layer_name="layer.0"), "cross_attentions": OutputRecorder(SwitchTransformersAttention, index=-1, layer_name="layer.1"), "router_logits": OutputRecorder(SwitchTransformersTop1Router, index=2), } def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) self.is_decoder = config.is_decoder sparse_step = config.decoder_sparse_step if self.is_decoder else config.encoder_sparse_step config.num_layers = config.num_decoder_layers if self.is_decoder else config.num_layers self.block = nn.ModuleList() for i in range(config.num_layers): is_sparse = (i % sparse_step == 1 or sparse_step == 1) if sparse_step > 0 else False self.block.append( SwitchTransformersBlock( config, has_relative_attention_bias=bool(i == 0), is_sparse=is_sparse, layer_idx=i ) ) self.final_layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.post_init() self.gradient_checkpointing = False @merge_with_config_defaults @capture_outputs def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, past_key_values=None, use_cache=None, cache_position=None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | MoEModelOutputWithPastAndCrossAttentions: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = inputs_embeds.shape[:2] if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache( DynamicCache(config=self.config), DynamicCache(config=self.config) ) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, ) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): hidden_states = layer_module( hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring class SwitchTransformersModel(SwitchTransformersPreTrainedModel): _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", "decoder.embed_tokens.weight": "shared.weight", } _input_embed_layer = "shared" def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False self.encoder = SwitchTransformersStack(encoder_config) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True self.decoder = SwitchTransformersStack(decoder_config) # Initialize weights and apply final processing self.post_init() def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) @auto_docstring @can_return_tuple def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, decoder_input_ids: torch.LongTensor | None = None, decoder_attention_mask: torch.BoolTensor | None = None, encoder_outputs: tuple[tuple[torch.FloatTensor]] | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.Tensor | None = None, decoder_inputs_embeds: torch.Tensor | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor] | Seq2SeqMoEModelOutput: if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs ) hidden_states = encoder_outputs[0] decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, cache_position=cache_position, **kwargs, ) return Seq2SeqMoEModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_logits, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_logits, ) @auto_docstring( custom_intro=""" SWITCH_TRANSFORMERS Model with a `language modeling` head on top. """ ) class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel, GenerationMixin): _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", "decoder.embed_tokens.weight": "shared.weight", "lm_head.weight": "shared.weight", } def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False self.encoder = SwitchTransformersStack(encoder_config) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.num_layers = config.num_decoder_layers self.decoder = SwitchTransformersStack(decoder_config) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.router_z_loss_coef = config.router_z_loss_coef self.router_aux_loss_coef = config.router_aux_loss_coef self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) @auto_docstring @can_return_tuple def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, decoder_input_ids: torch.LongTensor | None = None, decoder_attention_mask: torch.BoolTensor | None = None, encoder_outputs: tuple[tuple[torch.Tensor]] | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, decoder_inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, output_router_logits: bool | None = False, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor] | Seq2SeqMoEOutput: if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_router_logits=output_router_logits, **kwargs, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, cache_position=cache_position, output_router_logits=output_router_logits, **kwargs, ) sequence_output = decoder_outputs.last_hidden_state if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None encoder_z_loss = None encoder_aux_loss = None decoder_z_loss = None decoder_aux_loss = None if output_router_logits: # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder if self.encoder.config.encoder_sparse_step > 1: encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_outputs[-1]) encoder_z_loss = router_z_loss_func(encoder_router_logits) encoder_router_probs = nn.Softmax(dim=-1)(encoder_router_logits) encoder_aux_loss = load_balancing_loss_func(encoder_router_probs, encoder_expert_indexes) else: encoder_z_loss = 0 encoder_aux_loss = 0 if self.decoder.config.decoder_sparse_step > 1: decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_outputs[-1]) decoder_z_loss = router_z_loss_func(decoder_router_logits) decoder_router_probs = nn.Softmax(dim=-1)(decoder_router_logits) decoder_aux_loss = load_balancing_loss_func(decoder_router_probs, decoder_expert_indexes) else: decoder_z_loss = 0 decoder_aux_loss = 0 if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if output_router_logits: z_loss = self.router_z_loss_coef * (encoder_z_loss + decoder_z_loss) aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss) loss = loss + z_loss + aux_loss return Seq2SeqMoEOutput( loss=loss, logits=lm_logits, encoder_z_loss=encoder_z_loss, encoder_aux_loss=encoder_aux_loss, decoder_z_loss=decoder_z_loss, decoder_aux_loss=decoder_aux_loss, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_logits, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_logits, ) def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if len(router_output[0].shape) > 1: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) class SwitchTransformersEncoderModel(SwitchTransformersPreTrainedModel): _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", } def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) @auto_docstring @can_return_tuple def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor] | MoEModelOutput: use_cache = False encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs, ) return encoder_outputs __all__ = [ "SwitchTransformersEncoderModel", "SwitchTransformersForConditionalGeneration", "SwitchTransformersModel", "SwitchTransformersPreTrainedModel", "SwitchTransformersTop1Router", "SwitchTransformersSparseMLP", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/switch_transformers/modular_switch_transformers.py", "license": "Apache License 2.0", "lines": 691, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/edgetam/convert_edgetam_to_hf.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert SAM checkpoints from the original repository. URL: https://github.com/facebookresearch/segment-anything-2. """ import argparse import re from io import BytesIO import httpx import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( EdgeTamConfig, EdgeTamMaskDecoderConfig, EdgeTamModel, EdgeTamPromptEncoderConfig, EdgeTamVisionConfig, Sam2ImageProcessorFast, Sam2Processor, TimmWrapperConfig, ) def get_config(model_name): backbone_config = TimmWrapperConfig.from_pretrained( "timm/repvit_m1.dist_in1k", model_args={"in_chans": 3, "features_only": True, "out_indices": (0, 1, 2, 3)}, ) vision_config = EdgeTamVisionConfig(backbone_config=backbone_config) prompt_encoder_config = EdgeTamPromptEncoderConfig() mask_decoder_config = EdgeTamMaskDecoderConfig() enable_temporal_pos_encoding_for_object_pointers = False project_temporal_pos_encoding_in_object_pointers = False enable_occlusion_spatial_embedding = False config = EdgeTamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, enable_temporal_pos_encoding_for_object_pointers=enable_temporal_pos_encoding_for_object_pointers, project_temporal_pos_encoding_in_object_pointers=project_temporal_pos_encoding_in_object_pointers, enable_occlusion_spatial_embedding=enable_occlusion_spatial_embedding, ) return config KEYS_TO_MODIFY_MAPPING = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "dwconv": "depthwise_conv", "pwconv": "pointwise_conv", "fuser": "memory_fuser", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "obj_ptr_tpos_proj": "temporal_positional_encoding_projection_layer", "no_obj_embed_spatial": "occlusion_spatial_embedding_parameter", "sam_prompt_encoder": "prompt_encoder", "sam_mask_decoder": "mask_decoder", "maskmem_tpos_enc": "memory_temporal_positional_encoding", "gamma": "scale", "image_encoder.neck": "vision_encoder.neck", "image_encoder": "vision_encoder.backbone", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "pix_feat_proj": "feature_projection", "patch_embed.proj": "patch_embed.projection", "no_mem_embed": "no_memory_embedding", "no_mem_pos_enc": "no_memory_positional_encoding", "obj_ptr": "object_pointer", ".norm": ".layer_norm", "trunk.": "", "out_proj": "o_proj", "body.": "timm_model.", "ff.0": "feed_forward.layer_norm", "ff.1": "feed_forward.linear1", "ff.3": "feed_forward.linear2", } def replace_keys(state_dict): model_state_dict = {} output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" output_mask_decoder_mlps_pattern = r"mask_decoder.transformer.layers.(\d+).mlp.layers.(\d+).*" output_mask_decoder_score_head_pattern = r"mask_decoder.pred_obj_score_head.layers.(\d+).*" output_vision_encoder_mlps_pattern = r"vision_encoder.backbone.blocks.(\d+).mlp.layers.(\d+).*" output_vision_encoder_neck_pattern = r"vision_encoder.neck.convs.(\d+).conv" output_memory_encoder_projection_pattern = r"memory_encoder.o_proj.*" output_object_pointer_proj_pattern = r"object_pointer_proj.layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) # vision_encoder.blocks.0.mlp.layers.1.weight -> vision_encoder.blocks.0.mlp.proj_out.weight if re.match(output_vision_encoder_mlps_pattern, key): layer_nb = int(re.match(output_vision_encoder_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "proj_out") # mask_decoder.transformer.layers.0.mlp.layers.1.weight -> mask_decoder.transformer.layers.1.mlp.proj_out.weight if re.match(output_mask_decoder_mlps_pattern, key): layer_nb = int(re.match(output_mask_decoder_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("mlp.layers.0", "mlp.proj_in") elif layer_nb == 1: key = key.replace("mlp.layers.1", "mlp.proj_out") # mask_decoder.pred_obj_score_head.layers.1.weight -> mask_decoder.pred_obj_score_head.proj_in.weight if re.match(output_mask_decoder_score_head_pattern, key): layer_nb = int(re.match(output_mask_decoder_score_head_pattern, key).group(1)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") if re.match(output_hypernetworks_mlps_pattern, key): layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") # vision_encoder.neck.convs.1.conv.bias -> vision_encoder.neck.convs.1.bias if re.match(output_vision_encoder_neck_pattern, key): key = key.replace(".conv.", ".") # memory_encoder.o_proj.weight -> memory_encoder.projection.weight if re.match(output_memory_encoder_projection_pattern, key): key = key.replace(".o_proj.", ".projection.") if re.match(output_object_pointer_proj_pattern, key): layer_nb = int(re.match(output_object_pointer_proj_pattern, key).group(1)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") key = key.replace("layers.2", "proj_out") model_state_dict[key] = value model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] model_state_dict["prompt_encoder.point_embed.weight"] = torch.cat( [model_state_dict.pop(f"prompt_encoder.point_embed.{i}.weight") for i in range(4)], dim=0, ) return model_state_dict def convert_edgetam_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub, run_sanity_check): config = get_config(model_name) state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] state_dict = replace_keys(state_dict) image_processor = Sam2ImageProcessorFast() processor = Sam2Processor(image_processor=image_processor) hf_model = EdgeTamModel(config) hf_model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False) hf_model = hf_model.to(device) for pattern in EdgeTamModel._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pattern, k) is None] if missing_keys or unexpected_keys: print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) raise ValueError("Missing or unexpected keys in the state dict") if run_sanity_check: url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" with httpx.stream("GET", url) as response: raw_image = Image.open(BytesIO(response.read())).convert("RGB") input_points = [[[[1000, 600]]]] input_labels = [[[1]]] inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): output = hf_model(**inputs) scores = output.iou_scores.squeeze() assert torch.allclose(scores, torch.tensor([0.0356, 0.2141, 0.9707]).cuda(), atol=1e-3) if pytorch_dump_folder is not None: processor.save_pretrained(pytorch_dump_folder) hf_model.save_pretrained(pytorch_dump_folder) if push_to_hub: repo_id = f"yonigozlan/{pytorch_dump_folder.split('/')[-1]}" processor.push_to_hub(repo_id) hf_model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() choices = ["EdgeTAM"] parser.add_argument( "--model_name", default="EdgeTAM", choices=choices, type=str, help="Name of the original model to convert", ) parser.add_argument( "--checkpoint_path", type=str, required=False, help="Path to the original checkpoint", ) parser.add_argument("--pytorch_dump_folder_path", default="", type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--run_sanity_check", action="store_true", help="Whether to run the sanity check after converting", ) args = parser.parse_args() hf_model_name = args.model_name.replace("_", "-") checkpoint_path = ( hf_hub_download(f"facebook/{hf_model_name}", f"{args.model_name.lower()}.pt") if args.checkpoint_path is None else args.checkpoint_path ) convert_edgetam_checkpoint( args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.run_sanity_check )
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/edgetam/convert_edgetam_to_hf.py", "license": "Apache License 2.0", "lines": 239, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/edgetam/modular_edgetam.py
# Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SAM 2 model.""" import torch from ... import initialization as init from ...configuration_utils import PreTrainedConfig from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import ( auto_docstring, ) from ...utils.generic import TransformersKwargs, merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ..auto import CONFIG_MAPPING, AutoConfig from ..sam2.configuration_sam2 import Sam2Config, Sam2MaskDecoderConfig, Sam2PromptEncoderConfig from ..sam2.modeling_sam2 import ( Sam2Attention, Sam2FeedForward, Sam2LayerNorm, Sam2Model, Sam2PreTrainedModel, Sam2TwoWayAttentionBlock, Sam2VisionEncoderOutput, Sam2VisionModel, ) class EdgeTamVisionConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`EdgeTamVisionModel`]. It is used to instantiate a SAM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny [facebook/EdgeTAM](https://huggingface.co/facebook/EdgeTAM) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `timm/repvit_m1.dist_in1k`): Configuration for the vision backbone. This is used to instantiate the backbone using `AutoModel.from_config`. backbone_channel_list (`List[int]`, *optional*, defaults to `[384, 192, 96, 48]`): The list of channel dimensions for the backbone. backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`): The spatial sizes of the feature maps from the backbone. fpn_hidden_size (`int`, *optional*, defaults to 256): The hidden dimension of the FPN. fpn_kernel_size (`int`, *optional*, defaults to 1): The kernel size for the convolutions in the neck. fpn_stride (`int`, *optional*, defaults to 1): The stride for the convolutions in the neck. fpn_padding (`int`, *optional*, defaults to 0): The padding for the convolutions in the neck. fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`): The levels for the top-down FPN connections. num_feature_levels (`int`, *optional*, defaults to 3): The number of feature levels from the FPN to use. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the neck. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon for the layer normalization. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ base_config_key = "vision_config" model_type = "edgetam_vision_model" sub_configs = { "backbone_config": AutoConfig, } def __init__( self, backbone_config=None, backbone_channel_list=None, backbone_feature_sizes=None, fpn_hidden_size=256, fpn_kernel_size=1, fpn_stride=1, fpn_padding=0, fpn_top_down_levels=None, num_feature_levels=3, hidden_act="gelu", layer_norm_eps=1e-6, initializer_range=0.02, **kwargs, ): backbone_channel_list = [384, 192, 96, 48] if backbone_channel_list is None else backbone_channel_list backbone_feature_sizes = ( [[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes ) fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels if isinstance(backbone_config, dict): backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper") backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config) elif backbone_config is None: backbone_config = AutoConfig.from_pretrained( "timm/repvit_m1.dist_in1k", model_args={"in_chans": 3, "features_only": True, "out_indices": [0, 1, 2, 3]}, ) self.backbone_config = backbone_config # Neck self.backbone_channel_list = backbone_channel_list self.backbone_feature_sizes = backbone_feature_sizes self.fpn_hidden_size = fpn_hidden_size self.fpn_kernel_size = fpn_kernel_size self.fpn_stride = fpn_stride self.fpn_padding = fpn_padding self.fpn_top_down_levels = fpn_top_down_levels self.num_feature_levels = num_feature_levels self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range super().__init__(**kwargs) class EdgeTamPromptEncoderConfig(Sam2PromptEncoderConfig): pass class EdgeTamMaskDecoderConfig(Sam2MaskDecoderConfig): pass class EdgeTamConfig(Sam2Config): r""" [`EdgeTamConfig`] is the configuration class to store the configuration of a [`EdgeTamModel`]. It is used to instantiate a EDGETAM model according to the specified arguments, defining the memory attention, memory encoder, and image encoder configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny [facebook/edgetam.1-hiera-tiny](https://huggingface.co/facebook/edgetam.1-hiera-tiny) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. <Tip> EdgeTAM checkpoints with `model_type="edgetam_video"` are compatible with `EdgeTamModel` since the video variant weights are a superset of the image-only model weights. You may see a warning about model type mismatch when loading such checkpoints, which can be safely ignored in this case. </Tip> Args: vision_config (Union[`dict`, `EdgeTamVisionConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamVisionConfig`]. prompt_encoder_config (Union[`dict`, `EdgeTamPromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamPromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `EdgeTamMaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamMaskDecoderConfig`]. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation for parameter initialization. Example: ```python >>> from transformers import ( ... EdgeTamVisionConfig, ... EdgeTamPromptEncoderConfig, ... EdgeTamMaskDecoderConfig, ... EdgeTamModel, ... ) >>> # Initializing a EdgeTamConfig with `"facebook/edgetam.1_hiera_tiny"` style configuration >>> configuration = EdgeTamConfig() >>> # Initializing a EdgeTamModel (with random weights) from the `"facebook/edgetam.1_hiera_tiny"` style configuration >>> model = EdgeTamModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a EdgeTamConfig from a EdgeTamVisionConfig, EdgeTamPromptEncoderConfig, and EdgeTamMaskDecoderConfig >>> # Initializing EDGETAM vision encoder, memory attention, and memory encoder configurations >>> vision_config = EdgeTamVisionConfig() >>> prompt_encoder_config = EdgeTamPromptEncoderConfig() >>> mask_decoder_config = EdgeTamMaskDecoderConfig() >>> config = EdgeTamConfig(vision_config, prompt_encoder_config, mask_decoder_config) ``` """ pass class EdgeTamLayerNorm(Sam2LayerNorm): pass class EdgeTamVisionEncoderOutput(Sam2VisionEncoderOutput): pass class EdgeTamAttention(Sam2Attention): pass class EdgeTamTwoWayAttentionBlock(Sam2TwoWayAttentionBlock): pass class EdgeTamFeedForward(Sam2FeedForward): pass @auto_docstring class EdgeTamPreTrainedModel(Sam2PreTrainedModel): _keys_to_ignore_on_load_unexpected = None @torch.no_grad() def _init_weights(self, module): PreTrainedModel._init_weights(self, module) if isinstance(module, EdgeTamModel): if module.no_memory_embedding is not None: init.zeros_(module.no_memory_embedding) elif hasattr(module, "positional_embedding"): init.normal_(module.positional_embedding, std=module.scale) @auto_docstring( custom_intro=""" The vision model from EdgeTAM without any head or projection on top. """ ) class EdgeTamVisionModel(Sam2VisionModel): config_class = EdgeTamVisionConfig main_input_name = "pixel_values" # TODO: TimmWrapper models aren't compatible with _can_record_outputs yet. We specifically set this to # an empty dict to avoid the _can_record_outputs from Sam2VisionModel being inherited here. _can_record_outputs = {} def get_input_embeddings(self): raise NotImplementedError("Can't get input embeddings from timm wrapper model") @merge_with_config_defaults @capture_outputs def forward( self, pixel_values: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | EdgeTamVisionEncoderOutput: if pixel_values is None: raise ValueError("You have to specify pixel_values") # Forward through backbone backbone_output = self.backbone(pixel_values, **kwargs) intermediate_hidden_states = backbone_output.last_hidden_state intermediate_hidden_states = [hidden_state.permute(0, 2, 3, 1) for hidden_state in intermediate_hidden_states] fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states) # Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1] fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1] return EdgeTamVisionEncoderOutput( last_hidden_state=intermediate_hidden_states[-1], fpn_hidden_states=fpn_hidden_states, fpn_position_encoding=fpn_position_encoding, hidden_states=backbone_output.hidden_states, ) class EdgeTamModel(Sam2Model): _keys_to_ignore_on_load_unexpected = [ r"^memory_.*", r"^mask_downsample.*", r"spatial_perceiver.*", r"^object_pointer_proj.*", r"^temporal_positional_encoding_projection_layer.*", "no_memory_positional_encoding", "no_object_pointer", "occlusion_spatial_embedding_parameter", ] def get_input_embeddings(self): raise NotImplementedError("Can't get input embeddings from timm wrapper model") __all__ = [ "EdgeTamModel", "EdgeTamVisionModel", "EdgeTamPreTrainedModel", "EdgeTamConfig", "EdgeTamVisionConfig", "EdgeTamPromptEncoderConfig", "EdgeTamMaskDecoderConfig", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/edgetam/modular_edgetam.py", "license": "Apache License 2.0", "lines": 247, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/edgetam_video/convert_edgetam_video_to_hf.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert SAM checkpoints from the original repository. URL: https://github.com/facebookresearch/segment-anything-2. """ import argparse import re from io import BytesIO import httpx import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( EdgeTamVideoConfig, EdgeTamVideoMaskDecoderConfig, EdgeTamVideoModel, EdgeTamVideoPromptEncoderConfig, EdgeTamVisionConfig, Sam2ImageProcessorFast, Sam2VideoProcessor, Sam2VideoVideoProcessor, TimmWrapperConfig, ) def get_config(model_name): backbone_config = TimmWrapperConfig.from_pretrained( "timm/repvit_m1.dist_in1k", model_args={"in_chans": 3, "features_only": True, "out_indices": (0, 1, 2, 3)}, ) vision_config = EdgeTamVisionConfig(backbone_config=backbone_config) prompt_encoder_config = EdgeTamVideoPromptEncoderConfig() mask_decoder_config = EdgeTamVideoMaskDecoderConfig() enable_temporal_pos_encoding_for_object_pointers = False enable_occlusion_spatial_embedding = False config = EdgeTamVideoConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, enable_temporal_pos_encoding_for_object_pointers=enable_temporal_pos_encoding_for_object_pointers, enable_occlusion_spatial_embedding=enable_occlusion_spatial_embedding, ) return config KEYS_TO_MODIFY_MAPPING = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "dwconv": "depthwise_conv", "pwconv": "pointwise_conv", "fuser": "memory_fuser", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "obj_ptr_tpos_proj": "temporal_positional_encoding_projection_layer", "no_obj_embed_spatial": "occlusion_spatial_embedding_parameter", "sam_prompt_encoder": "prompt_encoder", "sam_mask_decoder": "mask_decoder", "maskmem_tpos_enc": "memory_temporal_positional_encoding", "gamma": "scale", "image_encoder.neck": "vision_encoder.neck", "image_encoder": "vision_encoder.backbone", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "pix_feat_proj": "feature_projection", "patch_embed.proj": "patch_embed.projection", "no_mem_embed": "no_memory_embedding", "no_mem_pos_enc": "no_memory_positional_encoding", "obj_ptr": "object_pointer", ".norm": ".layer_norm", "trunk.": "", "out_proj": "o_proj", "body.": "timm_model.", "ff.0": "mlp.layer_norm", "ff.1": "mlp.up_proj", "ff.3": "mlp.down_proj", } def replace_keys(state_dict): model_state_dict = {} output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" output_mask_decoder_mlps_pattern = r"mask_decoder.transformer.layers.(\d+).mlp.layers.(\d+).*" output_mask_decoder_score_head_pattern = r"mask_decoder.pred_obj_score_head.layers.(\d+).*" output_vision_encoder_mlps_pattern = r"vision_encoder.backbone.blocks.(\d+).mlp.layers.(\d+).*" output_vision_encoder_neck_pattern = r"vision_encoder.neck.convs.(\d+).conv" output_memory_encoder_projection_pattern = r"memory_encoder.o_proj.*" memory_attention_pattern = r"memory_attention.*" output_object_pointer_proj_pattern = r"object_pointer_proj.layers.(\d+).*" output_memory_encoder_mask_downsampler_pattern = r"memory_encoder.mask_downsampler.encoder.(\d+).*" perceiver_resampler_patterns = { r"spatial_perceiver.latents": r"spatial_perceiver.latents_1d", r"spatial_perceiver.latents_1d_2d": r"spatial_perceiver.latents_2d", r"spatial_perceiver.layers.(\d+).attn.layer_norm_x": r"spatial_perceiver.layers.\1.layer_norm_input", r"spatial_perceiver.layers.(\d+).attn.layer_norm_latents": r"spatial_perceiver.layers.\1.layer_norm_latents", r"spatial_perceiver.layers.(\d+).self_attn.layer_norm": r"spatial_perceiver.layers.\1.layer_norm_self", r"spatial_perceiver.layers.(\d+).attn.to_q": r"spatial_perceiver.layers.\1.cross_attention.q_proj", r"spatial_perceiver.layers.(\d+).attn.to_kv": r"spatial_perceiver.layers.\1.cross_attention.kv_proj_combined", r"spatial_perceiver.layers.(\d+).attn.to_out": r"spatial_perceiver.layers.\1.cross_attention.o_proj", r"spatial_perceiver.layers.(\d+).self_attn.to_q": r"spatial_perceiver.layers.\1.self_attention.q_proj", r"spatial_perceiver.layers.(\d+).self_attn.to_kv": r"spatial_perceiver.layers.\1.self_attention.kv_proj_combined", r"spatial_perceiver.layers.(\d+).self_attn.to_out": r"spatial_perceiver.layers.\1.self_attention.o_proj", r"spatial_perceiver.layers.(\d+).attn": r"spatial_perceiver.layers.\1.cross_attention", r"spatial_perceiver.layers.(\d+).self_attn": r"spatial_perceiver.layers.\1.self_attention", } for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) for pattern, replacement in perceiver_resampler_patterns.items(): if re.match(pattern, key): key = re.sub(pattern, replacement, key) # vision_encoder.blocks.0.mlp.layers.1.weight -> vision_encoder.blocks.0.mlp.proj_out.weight if re.match(output_vision_encoder_mlps_pattern, key): layer_nb = int(re.match(output_vision_encoder_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "proj_out") if re.match(memory_attention_pattern, key): key = key.replace("linear1", "mlp.up_proj") key = key.replace("linear2", "mlp.down_proj") # mask_decoder.transformer.layers.0.mlp.layers.1.weight -> mask_decoder.transformer.layers.1.mlp.proj_out.weight if re.match(output_mask_decoder_mlps_pattern, key): layer_nb = int(re.match(output_mask_decoder_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("mlp.layers.0", "mlp.proj_in") elif layer_nb == 1: key = key.replace("mlp.layers.1", "mlp.proj_out") # mask_decoder.pred_obj_score_head.layers.1.weight -> mask_decoder.pred_obj_score_head.proj_in.weight if re.match(output_mask_decoder_score_head_pattern, key): layer_nb = int(re.match(output_mask_decoder_score_head_pattern, key).group(1)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") if re.match(output_hypernetworks_mlps_pattern, key): layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") # vision_encoder.neck.convs.1.conv.bias -> vision_encoder.neck.convs.1.bias if re.match(output_vision_encoder_neck_pattern, key): key = key.replace(".conv.", ".") # memory_encoder.o_proj.weight -> memory_encoder.projection.weight if re.match(output_memory_encoder_projection_pattern, key): key = key.replace(".o_proj.", ".projection.") if re.match(output_object_pointer_proj_pattern, key): layer_nb = int(re.match(output_object_pointer_proj_pattern, key).group(1)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") key = key.replace("layers.2", "proj_out") if re.match(output_memory_encoder_mask_downsampler_pattern, key): layer_nb = int(re.match(output_memory_encoder_mask_downsampler_pattern, key).group(1)) if layer_nb == 12: key = key.replace(f"encoder.{layer_nb}", "final_conv") elif layer_nb % 3 == 0: key = key.replace(f"encoder.{layer_nb}", f"layers.{layer_nb // 3}.conv") elif layer_nb % 3 == 1: key = key.replace(f"encoder.{layer_nb}", f"layers.{layer_nb // 3}.layer_norm") if "kv_proj_combined" in key: # Split the weight tensor in half along dimension 0 (output dimension) k_weight, v_weight = torch.chunk(value, 2, dim=0) # Create the k_proj and v_proj keys k_key = key.replace("kv_proj_combined", "k_proj") v_key = key.replace("kv_proj_combined", "v_proj") model_state_dict[k_key] = k_weight model_state_dict[v_key] = v_weight continue model_state_dict[key] = value model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] model_state_dict["prompt_encoder.point_embed.weight"] = torch.cat( [model_state_dict.pop(f"prompt_encoder.point_embed.{i}.weight") for i in range(4)], dim=0, ) return model_state_dict def convert_edgetam_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub, run_sanity_check): config = get_config(model_name) state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] state_dict = replace_keys(state_dict) image_processor = Sam2ImageProcessorFast() video_processor = Sam2VideoVideoProcessor() processor = Sam2VideoProcessor(image_processor=image_processor, video_processor=video_processor) hf_model = EdgeTamVideoModel(config) hf_model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=True) hf_model = hf_model.to(device) print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) if run_sanity_check: url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" with httpx.stream("GET", url) as response: raw_image = Image.open(BytesIO(response.read())).convert("RGB") input_points = [[[[1000, 600]]]] input_labels = [[[1]]] inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): output = hf_model._single_frame_forward(**inputs) scores = output.iou_scores.squeeze() assert torch.allclose(scores, torch.tensor([0.0356, 0.2141, 0.9707]).cuda(), atol=1e-3) if pytorch_dump_folder is not None: processor.save_pretrained(pytorch_dump_folder) hf_model.save_pretrained(pytorch_dump_folder) if push_to_hub: repo_id = f"yonigozlan/{pytorch_dump_folder.split('/')[-1]}" processor.push_to_hub(repo_id) hf_model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() choices = ["EdgeTAM"] parser.add_argument( "--model_name", default="EdgeTAM", choices=choices, type=str, help="Name of the original model to convert", ) parser.add_argument( "--checkpoint_path", type=str, required=False, help="Path to the original checkpoint", ) parser.add_argument("--pytorch_dump_folder_path", default="", type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--run_sanity_check", action="store_true", help="Whether to run the sanity check after converting", ) args = parser.parse_args() hf_model_name = args.model_name.replace("_", "-") checkpoint_path = ( hf_hub_download(f"facebook/{hf_model_name}", f"{args.model_name.lower()}.pt") if args.checkpoint_path is None else args.checkpoint_path ) convert_edgetam_checkpoint( args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.run_sanity_check )
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/edgetam_video/convert_edgetam_video_to_hf.py", "license": "Apache License 2.0", "lines": 275, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/edgetam_video/modular_edgetam_video.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Callable from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from ... import initialization as init from ...activations import ACT2FN from ...configuration_utils import PreTrainedConfig from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import ( auto_docstring, ) from ...utils.output_capturing import OutputRecorder from ..auto import CONFIG_MAPPING, AutoConfig from ..sam2.modeling_sam2 import eager_attention_forward, window_partition from ..sam2_video.configuration_sam2_video import ( Sam2VideoConfig, Sam2VideoMaskDecoderConfig, Sam2VideoPromptEncoderConfig, ) from ..sam2_video.modeling_sam2_video import ( Sam2VideoAttention, Sam2VideoFeedForward, Sam2VideoImageSegmentationOutput, Sam2VideoInferenceSession, Sam2VideoLayerNorm, Sam2VideoMemoryAttention, Sam2VideoMemoryEncoder, Sam2VideoMemoryFuserCXBlock, Sam2VideoModel, Sam2VideoPositionEmbeddingSine, Sam2VideoPreTrainedModel, Sam2VideoSegmentationOutput, Sam2VideoTwoWayAttentionBlock, Sam2VideoVisionEncoderOutput, Sam2VideoVisionRotaryEmbedding, rotate_pairwise, ) class EdgeTamVideoPromptEncoderConfig(Sam2VideoPromptEncoderConfig): pass class EdgeTamVideoMaskDecoderConfig(Sam2VideoMaskDecoderConfig): pass class EdgeTamVideoConfig(Sam2VideoConfig): r""" [`EdgeTamVideoConfig`] is the configuration class to store the configuration of a [`EdgeTamVideoModel`]. It is used to instantiate a EDGETAM model according to the specified arguments, defining the memory attention, memory encoder, and image encoder configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny [facebook/EdgeTAM](https://huggingface.co/facebook/EdgeTAM) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vision_config (Union[`dict`, `EdgeTamVideoVisionConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamVideoVisionConfig`]. prompt_encoder_config (Union[`dict`, `EdgeTamVideoPromptEncoderConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamVideoPromptEncoderConfig`]. mask_decoder_config (Union[`dict`, `EdgeTamVideoMaskDecoderConfig`], *optional*): Dictionary of configuration options used to initialize [`EdgeTamMaskDecoderConfig`]. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation for parameter initialization. num_maskmem (`int`, *optional*, defaults to 7): The number of memory slots for the mask memory. image_size (`int`, *optional*, defaults to 1024): The size of the input images. sigmoid_scale_for_mem_enc (`float`, *optional*, defaults to 20.0): Scale factor for the sigmoid function in the memory encoder. sigmoid_bias_for_mem_enc (`float`, *optional*, defaults to -10.0): Bias for the sigmoid function in the memory encoder. enable_occlusion_spatial_embedding (`bool`, *optional*, defaults to `True`): Whether to enable spatial embedding for occlusions. multimask_output_in_sam (`bool`, *optional*, defaults to `True`): Whether to output multiple masks from the SAM head. multimask_min_pt_num (`int`, *optional*, defaults to 0): The minimum number of points to trigger multimask output. multimask_max_pt_num (`int`, *optional*, defaults to 1): The maximum number of points to trigger multimask output. multimask_output_for_tracking (`bool`, *optional*, defaults to `True`): Whether to use multimask output for tracking. max_object_pointers_in_encoder (`int`, *optional*, defaults to 16): The maximum number of object pointers in the encoder. max_cond_frame_num (`int`, *optional*, defaults to -1): Maximum number of conditioning frames to use in memory attention. Set to -1 to use all conditioning frames. enable_temporal_pos_encoding_for_object_pointers (`bool`, *optional*, defaults to `True`): Whether to enable temporal positional encoding for object pointers. memory_attention_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the memory attention hidden states. memory_attention_num_layers (`int`, *optional*, defaults to 2): The number of layers in the memory attention module. memory_attention_num_attention_heads (`int`, *optional*, defaults to 1): Number of attention heads for each attention layer in the memory attention. memory_attention_downsample_rate (`int`, *optional*, defaults to 1): The downsample rate for the attention layers. memory_attention_mlp_hidden_size (`int`, *optional*, defaults to 2048): The dimension of the feedforward network in the memory attention module. memory_attention_mlp_hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in the feedforward network in the memory attention module. memory_attention_dropout (`float`, *optional*, defaults to 0.1): The dropout rate for the memory attention module. memory_attention_rope_theta (`float`, *optional*, defaults to 10000): The Rope theta parameter. memory_attention_rope_feat_sizes (`Tuple[int, int]`, *optional*, defaults to `[64, 64]`): The feature sizes for the Rope positional encoding. memory_attention_rope_k_sizes (`List[int]`, *optional*, defaults to `[16, 16]`): The key feature sizes for the RoPE positional encoding in memory attention. memory_attention_rope_dropout (`float`, *optional*, defaults to 0.1): The dropout rate for the Rope positional encoding. perceiver_resampler_num_latents (`int`, *optional*, defaults to 256): The number of 1D latent tokens in the perceiver resampler. perceiver_resampler_num_latents_2d (`int`, *optional*, defaults to 256): The number of 2D latent tokens in the perceiver resampler. perceiver_resampler_hidden_size (`int`, *optional*, defaults to 64): The hidden size of the perceiver resampler. perceiver_resampler_mlp_intermediate_size (`int`, *optional*, defaults to 256): The intermediate size of the feedforward network in the perceiver resampler. perceiver_resampler_num_attention_heads (`int`, *optional*, defaults to 1): The number of attention heads in the perceiver resampler. perceiver_resampler_attention_head_dim (`int`, *optional*, defaults to 64): The dimension of each attention head in the perceiver resampler. perceiver_resampler_num_layers (`int`, *optional*, defaults to 2): The number of layers in the perceiver resampler. perceiver_resampler_hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout rate for the hidden layers in the perceiver resampler. perceiver_resampler_attention_dropout (`float`, *optional*, defaults to 0.0): The dropout rate for the attention layers in the perceiver resampler. memory_encoder_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the memory encoder hidden states. memory_encoder_output_channels (`int`, *optional*, defaults to 64): The number of output channels for the memory encoder. mask_downsampler_embed_dim (`int`, *optional*, defaults to 256): The dimension of the mask downsampler embedding. memory_fuser_intermediate_dim (`int`, *optional*, defaults to 1024): The intermediate dimension of the memory fuser feedforward network. mask_downsampler_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the mask downsampler. mask_downsampler_stride (`int`, *optional*, defaults to 2): The stride for the mask downsampler. mask_downsampler_padding (`int`, *optional*, defaults to 1): The padding for the mask downsampler. mask_downsampler_total_stride (`int`, *optional*, defaults to 16): The total stride for the mask downsampler. mask_downsampler_hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the mask downsampler. memory_fuser_num_layers (`int`, *optional*, defaults to 2): The number of layers in the memory fuser. memory_fuser_embed_dim (`int`, *optional*, defaults to 256): The dimension of the memory fuser embedding. memory_fuser_kernel_size (`int`, *optional*, defaults to 7): The kernel size for the memory fuser. memory_fuser_padding (`int`, *optional*, defaults to 3): The padding for the memory fuser. memory_fuser_layer_scale_init_value (`float`, *optional*, defaults to 1e-06): The initial value for the layer scale in the memory fuser. memory_fuser_hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the memory fuser. Example: ```python >>> from transformers import ( ... EdgeTamVisionConfig, ... EdgeTamVideoPromptEncoderConfig, ... EdgeTamVideoMaskDecoderConfig, ... EdgeTamVideoModel, ... EdgeTamVideoConfig, ... ) >>> # Initializing a EdgeTamVideoConfig with `"facebook/edgetam.1_hiera_tiny"` style configuration >>> configuration = EdgeTamVideoConfig() >>> # Initializing a EdgeTamVideoModel (with random weights) from the `"facebook/edgetam.1_hiera_tiny"` style configuration >>> model = EdgeTamVideoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a EdgeTamConfig from a EdgeTamVisionConfig, EdgeTamPromptEncoderConfig, and EdgeTamMaskDecoderConfig >>> # Initializing EDGETAM vision encoder, memory attention, and memory encoder configurations >>> vision_config = EdgeTamVisionConfig() >>> prompt_encoder_config = EdgeTamVideoPromptEncoderConfig() >>> mask_decoder_config = EdgeTamVideoMaskDecoderConfig() >>> config = EdgeTamVideoConfig(vision_config, prompt_encoder_config, mask_decoder_config) ```""" model_type = "edgetam_video" sub_configs = { "vision_config": AutoConfig, "prompt_encoder_config": EdgeTamVideoPromptEncoderConfig, "mask_decoder_config": EdgeTamVideoMaskDecoderConfig, } def __init__( self, vision_config=None, prompt_encoder_config=None, mask_decoder_config=None, initializer_range=0.02, num_maskmem=7, image_size=1024, sigmoid_scale_for_mem_enc=20.0, sigmoid_bias_for_mem_enc=-10.0, enable_occlusion_spatial_embedding=True, multimask_output_in_sam=True, multimask_min_pt_num=0, multimask_max_pt_num=1, multimask_output_for_tracking=True, max_object_pointers_in_encoder=16, max_cond_frame_num=-1, enable_temporal_pos_encoding_for_object_pointers=True, # memory attention memory_attention_hidden_size=256, memory_attention_num_layers=2, memory_attention_num_attention_heads=1, memory_attention_downsample_rate=1, memory_attention_mlp_hidden_size=2048, memory_attention_mlp_hidden_act="relu", memory_attention_dropout=0.1, memory_attention_rope_theta=10000, memory_attention_rope_feat_sizes=None, memory_attention_rope_k_sizes=None, memory_attention_rope_dropout=0.1, # spatial perceiver resampler perceiver_resampler_num_latents=256, perceiver_resampler_num_latents_2d=256, perceiver_resampler_hidden_size=64, perceiver_resampler_mlp_intermediate_size=256, perceiver_resampler_num_attention_heads=1, perceiver_resampler_attention_head_dim=64, perceiver_resampler_num_layers=2, perceiver_resampler_hidden_dropout=0.0, perceiver_resampler_attention_dropout=0.0, # memory encoder memory_encoder_hidden_size=256, memory_encoder_output_channels=64, mask_downsampler_embed_dim=256, memory_fuser_intermediate_dim=1024, mask_downsampler_kernel_size=3, mask_downsampler_stride=2, mask_downsampler_padding=1, mask_downsampler_total_stride=16, mask_downsampler_hidden_act="gelu", memory_fuser_num_layers=2, memory_fuser_embed_dim=256, memory_fuser_kernel_size=7, memory_fuser_padding=3, memory_fuser_layer_scale_init_value=1e-6, memory_fuser_hidden_act="gelu", **kwargs, ): PreTrainedConfig.__init__(**kwargs) vision_config = vision_config if vision_config is not None else {} prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} memory_attention_rope_feat_sizes = ( [64, 64] if memory_attention_rope_feat_sizes is None else memory_attention_rope_feat_sizes ) memory_attention_rope_k_sizes = ( [16, 16] if memory_attention_rope_k_sizes is None else memory_attention_rope_k_sizes ) if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) if isinstance(prompt_encoder_config, EdgeTamVideoPromptEncoderConfig): prompt_encoder_config = prompt_encoder_config.to_dict() if isinstance(mask_decoder_config, EdgeTamVideoMaskDecoderConfig): mask_decoder_config = mask_decoder_config.to_dict() self.vision_config = vision_config self.prompt_encoder_config = EdgeTamVideoPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = EdgeTamVideoMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range self.num_maskmem = num_maskmem # default 1 input frame + 6 previous frames self.image_size = image_size self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc # scale factor for mask sigmoid prob self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc # bias factor for mask sigmoid prob self.enable_occlusion_spatial_embedding = enable_occlusion_spatial_embedding self.multimask_output_in_sam = multimask_output_in_sam self.multimask_min_pt_num = multimask_min_pt_num self.multimask_max_pt_num = multimask_max_pt_num self.multimask_output_for_tracking = multimask_output_for_tracking self.max_object_pointers_in_encoder = max_object_pointers_in_encoder self.max_cond_frame_num = max_cond_frame_num self.enable_temporal_pos_encoding_for_object_pointers = enable_temporal_pos_encoding_for_object_pointers # memory attention self.memory_attention_hidden_size = memory_attention_hidden_size self.memory_attention_num_layers = memory_attention_num_layers self.memory_attention_num_attention_heads = memory_attention_num_attention_heads self.memory_attention_downsample_rate = memory_attention_downsample_rate self.memory_attention_mlp_hidden_size = memory_attention_mlp_hidden_size self.memory_attention_mlp_hidden_act = memory_attention_mlp_hidden_act self.memory_attention_dropout = memory_attention_dropout self.memory_attention_rope_theta = memory_attention_rope_theta self.memory_attention_rope_feat_sizes = memory_attention_rope_feat_sizes self.memory_attention_rope_k_sizes = memory_attention_rope_k_sizes self.memory_attention_rope_dropout = memory_attention_rope_dropout # spatial perceiver resampler self.perceiver_resampler_num_latents = perceiver_resampler_num_latents self.perceiver_resampler_num_latents_2d = perceiver_resampler_num_latents_2d self.perceiver_resampler_hidden_size = perceiver_resampler_hidden_size self.perceiver_resampler_mlp_intermediate_size = perceiver_resampler_mlp_intermediate_size self.perceiver_resampler_attention_head_dim = perceiver_resampler_attention_head_dim self.perceiver_resampler_num_attention_heads = perceiver_resampler_num_attention_heads self.perceiver_resampler_num_layers = perceiver_resampler_num_layers self.perceiver_resampler_hidden_dropout = perceiver_resampler_hidden_dropout self.perceiver_resampler_attention_dropout = perceiver_resampler_attention_dropout # memory encoder self.memory_encoder_hidden_size = memory_encoder_hidden_size self.memory_encoder_output_channels = memory_encoder_output_channels self.mask_downsampler_embed_dim = mask_downsampler_embed_dim self.mask_downsampler_kernel_size = mask_downsampler_kernel_size self.mask_downsampler_stride = mask_downsampler_stride self.mask_downsampler_padding = mask_downsampler_padding self.mask_downsampler_total_stride = mask_downsampler_total_stride self.mask_downsampler_hidden_act = mask_downsampler_hidden_act self.memory_fuser_num_layers = memory_fuser_num_layers self.memory_fuser_embed_dim = memory_fuser_embed_dim self.memory_fuser_intermediate_dim = memory_fuser_intermediate_dim self.memory_fuser_kernel_size = memory_fuser_kernel_size self.memory_fuser_padding = memory_fuser_padding self.memory_fuser_layer_scale_init_value = memory_fuser_layer_scale_init_value self.memory_fuser_hidden_act = memory_fuser_hidden_act class EdgeTamVideoLayerNorm(Sam2VideoLayerNorm): pass class EdgeTamVideoMemoryFuserCXBlock(Sam2VideoMemoryFuserCXBlock): pass class EdgeTamVideoVisionEncoderOutput(Sam2VideoVisionEncoderOutput): pass class EdgeTamVideoVisionRotaryEmbedding(Sam2VideoVisionRotaryEmbedding): def __init__(self, config: EdgeTamVideoConfig, end_x: int | None = None, end_y: int | None = None): nn.Module.__init__() self.dim = config.memory_attention_hidden_size // ( config.memory_attention_downsample_rate * config.memory_attention_num_attention_heads ) # Ensure even dimension for proper axial splitting if self.dim % 4 != 0: raise ValueError("Dimension must be divisible by 4 for axial RoPE") self.end_x, self.end_y = config.memory_attention_rope_feat_sizes if end_x is None else (end_x, end_y) self.memory_attention_rope_theta = config.memory_attention_rope_theta # directly register the cos and sin embeddings as we have a fixed feature shape inv_freq = self.create_inv_freq() self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False) self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False) class EdgeTamVideoAttention(Sam2VideoAttention): pass def apply_rotary_pos_emb_2d_self_attn( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: """ Apply rotary position embedding to query and key tensors for self-attention. Args: q: Query tensor of shape (..., seq_len, head_dim) k: Key tensor of shape (..., seq_len, head_dim) cos: Cosine position embedding of shape (seq_len, head_dim) sin: Sine position embedding of shape (seq_len, head_dim) Returns: Rotated (q, k) tensors """ # Apply RoPE to queries q_embed = q.float() # force upscale to float32 as in the original implementation q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin) # Apply RoPE to keys (same embeddings as queries for self-attention) k_embed = k.float() # force upscale to float32 as in the original implementation k_embed = (k_embed * cos) + (rotate_pairwise(k_embed) * sin) return q_embed.type_as(q), k_embed.type_as(k) def apply_rotary_pos_emb_2d_cross_attn( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, cos_k: torch.Tensor, sin_k: torch.Tensor, num_k_exclude_rope: int = 0, repeat_freqs_k: int = 1, ) -> tuple[torch.Tensor, torch.Tensor]: """ Apply rotary position embedding to query and key tensors for cross-attention. Args: q: Query tensor of shape (..., seq_len, head_dim) k: Key tensor of shape (..., seq_len, head_dim) cos: Cosine position embedding of shape (seq_len, head_dim) sin: Sine position embedding of shape (seq_len, head_dim) cos_k: Cosine position embedding for keys of shape (seq_len, head_dim) sin_k: Sine position embedding for keys of shape (seq_len, head_dim) num_k_exclude_rope: Number of tokens at end of k to exclude from RoPE (e.g., object pointer tokens) repeat_freqs_k: Frequency repetition for keys in cross-attention (e.g., for spatial memory tokens) Returns: Rotated (q, k) tensors """ # Apply RoPE to queries (always straightforward) q_embed = q.float() q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin) # Split keys: RoPE tokens and excluded tokens (e.g., object pointers) num_total_k_tokens = k.shape[-2] k_for_rope = k[..., : num_total_k_tokens - num_k_exclude_rope, :] k_excluded = k[..., num_total_k_tokens - num_k_exclude_rope :, :] # Early return if no keys need RoPE if k_for_rope.shape[-2] == 0: return q_embed.type_as(q), k_excluded batch_size, num_heads, k_seq_len, channels_per_head = k_for_rope.shape # Handle temporal/spatial token structure for memory # Keys have temporal + spatial structure, only spatial tokens get RoPE tokens_per_group = k_seq_len // repeat_freqs_k spatial_tokens = cos_k.shape[-2] temporal_tokens = tokens_per_group - spatial_tokens # Reshape and separate temporal/spatial tokens k_grouped = k_for_rope.view(batch_size, num_heads, repeat_freqs_k, tokens_per_group, channels_per_head) k_temporal = k_grouped[..., :temporal_tokens, :].reshape(batch_size, num_heads, -1, channels_per_head) k_spatial = k_grouped[..., temporal_tokens:, :].reshape(batch_size, num_heads, -1, channels_per_head) # Only apply RoPE to spatial tokens k_rope_input = k_spatial # Prepare position embeddings for repeated groups if repeat_freqs_k > 1: cos_k = cos_k.repeat(1, 1, repeat_freqs_k, 1) sin_k = sin_k.repeat(1, 1, repeat_freqs_k, 1) # Apply RoPE to spatial tokens k_spatial_embed = k_rope_input.float() k_spatial_embed = (k_spatial_embed * cos_k) + (rotate_pairwise(k_spatial_embed) * sin_k) # Reconstruct: temporal + spatial tokens back to original structure k_spatial_reshaped = k_spatial_embed.view(batch_size, num_heads, repeat_freqs_k, -1, channels_per_head) k_temporal_reshaped = k_temporal.view(batch_size, num_heads, repeat_freqs_k, -1, channels_per_head) k_final = torch.cat([k_temporal_reshaped, k_spatial_reshaped], dim=3) k_final = k_final.view(batch_size, num_heads, k_seq_len, channels_per_head) # Combine RoPE-processed keys with excluded tokens k_embed = torch.cat([k_final.type_as(k), k_excluded], dim=-2) return q_embed.type_as(q), k_embed class EdgeTamVideoRoPESelfAttention(nn.Module): """Self-attention with rotary position encoding.""" def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.config = config self.hidden_size = config.memory_attention_hidden_size self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate self.num_attention_heads = config.memory_attention_num_attention_heads self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) self.o_proj = nn.Linear(self.internal_dim, self.hidden_size) self.dropout_p = config.memory_attention_rope_dropout def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], **kwargs: Unpack[FlashAttentionKwargs], ) -> Tensor: # Input projections batch_size, point_batch_size = query.shape[:2] new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim) query = self.q_proj(query).view(*new_shape).transpose(1, 2) key = self.k_proj(key).view(*new_shape).transpose(1, 2) value = self.v_proj(value).view(*new_shape).transpose(1, 2) cos, sin = position_embeddings # Apply rotary position encoding for self-attention query, key = apply_rotary_pos_emb_2d_self_attn(query, key, cos=cos, sin=sin) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=None, dropout=0.0 if not self.training else self.dropout_p, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape( batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim ).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class EdgeTamVideoRoPECrossAttention(nn.Module): """Cross-attention with rotary position encoding.""" def __init__(self, config: EdgeTamVideoConfig, kv_in_dim: int): super().__init__() self.config = config self.hidden_size = config.memory_attention_hidden_size self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate self.num_attention_heads = config.memory_attention_num_attention_heads self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.kv_in_dim = kv_in_dim self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) self.o_proj = nn.Linear(self.internal_dim, self.hidden_size) self.dropout_p = config.memory_attention_rope_dropout def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], position_embeddings_k: tuple[torch.Tensor, torch.Tensor], num_k_exclude_rope: int = 0, rope_k_repeat: int = 0, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tensor: # Input projections batch_size, point_batch_size = query.shape[:2] new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim) query = self.q_proj(query).view(*new_shape).transpose(1, 2) key = self.k_proj(key).view(*new_shape).transpose(1, 2) value = self.v_proj(value).view(*new_shape).transpose(1, 2) cos, sin = position_embeddings cos_k, sin_k = position_embeddings_k # Apply rotary position encoding for cross-attention query, key = apply_rotary_pos_emb_2d_cross_attn( query, key, cos=cos, sin=sin, cos_k=cos_k, sin_k=sin_k, repeat_freqs_k=rope_k_repeat, num_k_exclude_rope=num_k_exclude_rope, ) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=None, dropout=0.0 if not self.training else self.dropout_p, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape( batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim ).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class EdgeTamVideoTwoWayAttentionBlock(Sam2VideoTwoWayAttentionBlock): pass class EdgeTamVideoPositionEmbeddingSine(Sam2VideoPositionEmbeddingSine): # maxsize=2 because we need to cache the forward method for both memory encoder and perceiver resampler @compile_compatible_method_lru_cache(maxsize=2) def forward(self, **super_kwargs): return super().forward(**super_kwargs) class EdgeTamVideoMemoryEncoder(Sam2VideoMemoryEncoder): pass class EdgeTamVideoFeedForward(Sam2VideoFeedForward): pass class EdgeTamVideoPreTrainedModel(Sam2VideoPreTrainedModel): def _init_weights(self, module): super()._init_weights() if isinstance(module, EdgeTamVideoVisionRotaryEmbedding): inv_freq = module.create_inv_freq() init.copy_(module.rope_embeddings_cos, inv_freq.cos()) init.copy_(module.rope_embeddings_sin, inv_freq.sin()) class EdgeTamVideoInferenceSession(Sam2VideoInferenceSession): pass class EdgeTamVideoMemoryAttentionMLP(nn.Module): def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.config = config self.hidden_size = config.memory_attention_hidden_size self.intermediate_size = config.memory_attention_mlp_hidden_size self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size) self.dropout = nn.Dropout(config.memory_attention_dropout) self.act_fn = ACT2FN[config.memory_attention_mlp_hidden_act] def forward(self, x): return self.down_proj(self.dropout(self.act_fn(self.up_proj(x)))) class EdgeTamVideoMemoryAttentionLayer(nn.Module): def __init__(self, config: EdgeTamVideoConfig): super().__init__() hidden_size = config.memory_attention_hidden_size self.self_attn = EdgeTamVideoRoPESelfAttention(config) self.cross_attn_image = EdgeTamVideoRoPECrossAttention(config, kv_in_dim=64) # MLP module self.mlp = EdgeTamVideoMemoryAttentionMLP(config) self.layer_norm1 = nn.LayerNorm(hidden_size) self.layer_norm2 = nn.LayerNorm(hidden_size) self.layer_norm3 = nn.LayerNorm(hidden_size) self.dropout1 = nn.Dropout(config.memory_attention_dropout) self.dropout2 = nn.Dropout(config.memory_attention_dropout) self.dropout3 = nn.Dropout(config.memory_attention_dropout) def forward( self, queries: Tensor, keys: Tensor, key_point_embedding: Tensor, rope_position_embeddings: tuple[Tensor, Tensor], rope_position_embeddings_k: tuple[Tensor, Tensor] | None = None, num_k_exclude_rope: int = 0, rope_k_repeat: int = 0, ) -> torch.Tensor: # Self-Attention query = self.layer_norm1(queries) query, _ = self.self_attn(query=query, key=query, value=query, position_embeddings=rope_position_embeddings) queries = queries + self.dropout1(query) # Cross-Attention query = self.layer_norm2(queries) query, _ = self.cross_attn_image( query=query, key=keys + key_point_embedding, value=keys, position_embeddings=rope_position_embeddings, position_embeddings_k=rope_position_embeddings_k, num_k_exclude_rope=num_k_exclude_rope, rope_k_repeat=rope_k_repeat, ) queries = queries + self.dropout2(query) # MLP query = self.layer_norm3(queries) query = self.mlp(query) queries = queries + self.dropout3(query) return queries class EdgeTamVideoMemoryAttention(Sam2VideoMemoryAttention): def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.rotary_emb_k = EdgeTamVideoVisionRotaryEmbedding( config, end_x=config.memory_attention_rope_k_sizes[0], end_y=config.memory_attention_rope_k_sizes[1] ) def forward( self, current_vision_features: torch.Tensor, memory: torch.Tensor, current_vision_position_embeddings: Tensor | None = None, memory_posision_embeddings: Tensor | None = None, num_object_pointer_tokens: int = 0, num_spatial_memory_tokens: int = -1, ): """ Args: current_vision_features (`torch.FloatTensor`): The current vision features used for self-attention. memory (`torch.FloatTensor`): The memory features used for cross-attention. current_vision_position_embeddings (`torch.FloatTensor`, *optional*): The position embeddings for the current vision features. memory_posision_embeddings (`torch.FloatTensor`, *optional*): The position embeddings for the memory features. num_object_pointer_tokens (`int`, *optional*, defaults to 0): The number of object pointer tokens. """ output = current_vision_features if current_vision_position_embeddings is not None: output = output + 0.1 * current_vision_position_embeddings # Convert to batch first output = output.transpose(0, 1) memory = memory.transpose(0, 1).unsqueeze(1) memory_posision_embeddings = memory_posision_embeddings.transpose(0, 1).unsqueeze(1) rope_position_embeddings = self.rotary_emb() rope_position_embeddings_k = self.rotary_emb_k() for layer in self.layers: output = layer( queries=output.unsqueeze(1) if output.ndim == 3 else output, keys=memory, key_point_embedding=memory_posision_embeddings, rope_position_embeddings=rope_position_embeddings, rope_position_embeddings_k=rope_position_embeddings_k, num_k_exclude_rope=num_object_pointer_tokens, rope_k_repeat=num_spatial_memory_tokens, ) normed_output = self.layer_norm(output) # Convert back to seq first normed_output = normed_output.transpose(0, 1) return normed_output class EdgeTamVideoPerceiverMLP(nn.Module): def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.hidden_size = config.perceiver_resampler_hidden_size self.intermediate_size = config.perceiver_resampler_mlp_intermediate_size self.layer_norm = nn.LayerNorm(self.hidden_size) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = nn.GELU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.layer_norm(hidden_states) hidden_states = self.down_proj(self.act_fn(self.up_proj(hidden_states))) return hidden_states class EdgeTamVideoPerceiverAttention(nn.Module): def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.config = config self.hidden_size = config.perceiver_resampler_hidden_size self.num_attention_heads = config.perceiver_resampler_num_attention_heads self.head_dim = config.perceiver_resampler_attention_head_dim self.attention_dropout = config.perceiver_resampler_attention_dropout self.inner_dim = self.head_dim * self.num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.q_proj = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.inner_dim, bias=False) self.o_proj = nn.Linear(self.inner_dim, self.hidden_size, bias=False) def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, positional_encoding: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: # Project queries, keys, and values query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) # Reshape for multi-head attention batch_size, seq_len_q = query.shape[:2] query = query.view(batch_size, seq_len_q, self.num_attention_heads, self.head_dim).transpose(1, 2) seq_len_kv = key.shape[1] key = key.view(batch_size, seq_len_kv, self.num_attention_heads, self.head_dim).transpose(1, 2) value = value.view(batch_size, seq_len_kv, self.num_attention_heads, self.head_dim).transpose(1, 2) # Add positional encoding if provided if positional_encoding is not None: pos_encoding = positional_encoding.view( batch_size, seq_len_kv, self.num_attention_heads, self.head_dim ).transpose(1, 2) key = key + pos_encoding value = value + pos_encoding # Apply attention attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, _ = attention_interface( self, query, key, value, attention_mask=None, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) # Reshape output attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, seq_len_q, self.inner_dim) return self.o_proj(attn_output) class EdgeTamVideoPerceiverEncoderLayer(nn.Module): def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.cross_attention = EdgeTamVideoPerceiverAttention(config) self.mlp = EdgeTamVideoPerceiverMLP(config) self.dropout = nn.Dropout(config.perceiver_resampler_hidden_dropout) self.self_attention = EdgeTamVideoPerceiverAttention(config) self.self_mlp = EdgeTamVideoPerceiverMLP(config) # Layer norms moved from attention classes to here self.layer_norm_input = nn.LayerNorm(config.perceiver_resampler_hidden_size) self.layer_norm_latents = nn.LayerNorm(config.perceiver_resampler_hidden_size) self.layer_norm_self = nn.LayerNorm(config.perceiver_resampler_hidden_size) def forward( self, latents: torch.Tensor, input_features: torch.Tensor, positional_encoding: torch.Tensor | None = None, ) -> torch.Tensor: # Cross attention with layer norms normalized_latents = self.layer_norm_latents(latents) normalized_input = self.layer_norm_input(input_features) cross_attention_output = self.cross_attention( query=normalized_latents, key=normalized_input, value=normalized_input, positional_encoding=positional_encoding, ) latents = latents + self.dropout(cross_attention_output) mlp_output = self.mlp(latents) latents = latents + mlp_output # Self attention with layer norm normalized_latents_self = self.layer_norm_self(latents) self_attention_output = self.self_attention( query=normalized_latents_self, key=normalized_latents_self, value=normalized_latents_self ) latents = latents + self_attention_output self_mlp_output = self.self_mlp(latents) latents = latents + self_mlp_output return latents class EdgeTamVideoPerceiverResampler(nn.Module): def __init__(self, config: EdgeTamVideoConfig): super().__init__() self.config = config self.hidden_size = config.perceiver_resampler_hidden_size self.num_latents_1d = config.perceiver_resampler_num_latents self.num_latents_2d = config.perceiver_resampler_num_latents_2d self.num_layers = config.perceiver_resampler_num_layers if self.num_latents_1d > 0: self.latents_1d = nn.Parameter(torch.randn(self.num_latents_1d, self.hidden_size)) if self.num_latents_2d > 0: self.latents_2d = nn.Parameter(torch.randn(self.num_latents_2d, self.hidden_size)) self.positional_encoding = EdgeTamVideoPositionEmbeddingSine( num_pos_feats=self.hidden_size // 2, normalize=True ) self.layers = nn.ModuleList([EdgeTamVideoPerceiverEncoderLayer(config) for _ in range(self.num_layers)]) self.layer_norm = nn.LayerNorm(self.hidden_size) def forward( self, hidden_states: torch.Tensor, positional_encoding: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor | None]: output_latents = [] output_positional_encodings = [] if self.num_latents_1d > 0: latents_1d, pos_1d = self._forward_1d(hidden_states, positional_encoding) output_latents.append(latents_1d) output_positional_encodings.append(pos_1d) if self.num_latents_2d > 0: latents_2d, pos_2d = self._forward_2d(hidden_states) output_latents.append(latents_2d) output_positional_encodings.append(pos_2d) combined_latents = torch.cat(output_latents, dim=1) combined_positional_encoding = None if positional_encoding is not None and output_positional_encodings: combined_positional_encoding = torch.cat(output_positional_encodings, dim=1) return combined_latents, combined_positional_encoding def _forward_1d( self, hidden_states: torch.Tensor, positional_encoding: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor | None]: batch_size = hidden_states.shape[0] latents = self.latents_1d.unsqueeze(0).expand(batch_size, -1, -1) flattened_features = hidden_states.permute(0, 2, 3, 1).flatten(1, 2) positional_features = None if positional_encoding is not None: positional_features = positional_encoding.permute(0, 2, 3, 1).flatten(1, 2) for layer in self.layers: latents = layer(latents, flattened_features, positional_features) latents = self.layer_norm(latents) output_positional_encoding = None if positional_encoding is not None: output_positional_encoding = torch.zeros_like(latents) return latents, output_positional_encoding def _forward_2d(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size, channels, height, width = hidden_states.shape latents_2d = self.latents_2d.unsqueeze(0).expand(batch_size, -1, -1).view(-1, 1, channels) num_windows_per_dim = int(math.sqrt(self.num_latents_2d)) window_size = height // num_windows_per_dim windowed_input = hidden_states.permute(0, 2, 3, 1) windowed_features, _ = window_partition(windowed_input, window_size) windowed_features = windowed_features.flatten(1, 2) for layer in self.layers: latents_2d = layer(latents_2d, windowed_features, positional_encoding=None) latents_2d = latents_2d.view(batch_size, num_windows_per_dim, num_windows_per_dim, channels).permute( 0, 3, 1, 2 ) positional_encoding_2d = self.positional_encoding(latents_2d.shape, latents_2d.device, latents_2d.dtype).to( dtype=hidden_states.dtype ) positional_encoding_2d = positional_encoding_2d.permute(0, 2, 3, 1).flatten(1, 2) latents_2d = latents_2d.permute(0, 2, 3, 1).flatten(1, 2) latents_2d = self.layer_norm(latents_2d) return latents_2d, positional_encoding_2d class EdgeTamVideoImageSegmentationOutput(Sam2VideoImageSegmentationOutput): pass class EdgeTamVideoSegmentationOutput(Sam2VideoSegmentationOutput): pass @auto_docstring class EdgeTamVideoModel(Sam2VideoModel): _keys_to_ignore_on_load_unexpected = [] _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(EdgeTamVideoTwoWayAttentionBlock, index=2)} def __init__(self, config: EdgeTamVideoConfig): super().__init__(config) self.spatial_perceiver = EdgeTamVideoPerceiverResampler(config) self.post_init() def _build_memory_attention_inputs( self, temporal_positions_and_previous_outputs: list[tuple[int, dict]], device: torch.device, ) -> tuple[list[torch.Tensor], list[torch.Tensor]]: """ Concatenate memory features and positional embeddings from previous frames. Returns: Tuple of (memories_to_concatenate, memory_positional_embeddings_to_concatenate). """ memories_to_concatenate = [] memory_positional_embeddings_to_concatenate = [] for relative_temporal_offset, prev_output_data in temporal_positions_and_previous_outputs: if prev_output_data is None: continue # Skip if no output data for this temporal position (e.g., padding frames) # Load memory features (potentially from CPU to GPU) # Features are flattened: (Batch, Channels, H, W) -> (H*W, Batch, Channels) memory_features = prev_output_data["maskmem_features"].to(device, non_blocking=True) memories_to_concatenate.append(memory_features.permute(1, 0, 2)) # Spatial positional encoding (potentially from CPU to GPU) spatial_memory_pos_embed = prev_output_data["maskmem_pos_enc"].to(device, non_blocking=True) spatial_memory_pos_embed = spatial_memory_pos_embed.squeeze(1).permute(1, 0, 2) # Add temporal positional encoding # self.memory_temporal_positional_encoding shape: (NumMaskMem, 1, 1, MemDim) combined_memory_pos_embed = ( spatial_memory_pos_embed + self.memory_temporal_positional_encoding[relative_temporal_offset - 1] ) memory_positional_embeddings_to_concatenate.append(combined_memory_pos_embed) return memories_to_concatenate, memory_positional_embeddings_to_concatenate def _prepare_memory_conditioned_features( self, inference_session: EdgeTamVideoInferenceSession, frame_idx: int, obj_idx: int, is_initial_conditioning_frame: bool, current_vision_features: list[torch.Tensor], current_vision_positional_embeddings: list[torch.Tensor], num_total_frames: int, track_in_reverse_time: bool = False, streaming: bool = False, ) -> torch.Tensor: """ Fuse current frame's visual features with memory from previous frames for enhanced object tracking. This method conditions the current frame's visual features on temporal memory from previous frames, enabling consistent object tracking across video sequences. For initial conditioning frames, it uses no-memory embeddings. For subsequent frames, it retrieves and integrates memory features from both conditioning frames (user interactions) and non-conditioning frames (tracked results) via cross-attention. Args: inference_session (`EdgeTamVideoInferenceSession`): The video inference session object. frame_idx (`int`): Index of the current frame being processed. obj_idx (`int`): Index of the object being processed. is_initial_conditioning_frame (`bool`): Whether this is an initial conditioning frame with user inputs (True) or a subsequent tracking frame (False). current_vision_features (`torch.Tensor`): Highest-level vision features of shape `(seq_len, batch_size, channels)`. current_vision_positional_embeddings (`torch.Tensor`): Positional embedding tensors corresponding to the highest-level vision features. num_total_frames (`int`): Total number of frames in the video sequence. track_in_reverse_time (`bool`, *optional*, defaults to `False`): Whether tracking is performed in reverse temporal order. streaming (`bool`, *optional*, defaults to `False`): Whether this is streaming inference mode. Returns: `torch.Tensor`: Memory-conditioned feature tensor of shape `(batch_size, channels, height, width)` suitable for input to the SAM decoder. """ # Get dimensions from the highest-level (lowest-resolution) feature map batch_size = current_vision_features.size(1) num_channels = self.hidden_dim height, width = self.backbone_feature_sizes[-1] device = current_vision_features.device # If memory is disabled (e.g., for single image SAM), return current features directly. if self.num_maskmem == 0: # Permute (SeqLen, Batch, Channels) -> (Batch, Channels, SeqLen) then view as (Batch, Channels, Height, Width) # Assuming SeqLen = Height * Width for the last feature map current_feature_map = current_vision_features.permute(1, 2, 0).view( batch_size, num_channels, height, width ) return current_feature_map # Step 1: Handle initial conditioning frames if is_initial_conditioning_frame: # For initial conditioning frames, no prior memory is used directly in this block. # If configured, directly add a learnable "no memory" embedding. # current_vision_features has shape (SeqLen, Batch, Channels) conditioned_feature_map_flat = current_vision_features + self.no_memory_embedding # Reshape to (Batch, Channels, Height, Width) conditioned_feature_map = conditioned_feature_map_flat.permute(1, 2, 0).view( batch_size, num_channels, height, width ) return conditioned_feature_map # Step 2: Get memory frames and concatenate their features temporal_positions_and_previous_outputs = self._gather_memory_frame_outputs( inference_session, obj_idx, frame_idx, track_in_reverse_time ) memories_to_concatenate, memory_positional_embeddings_to_concatenate = self._build_memory_attention_inputs( temporal_positions_and_previous_outputs, device ) num_spatial_memory_tokens = len(memories_to_concatenate) # Step 3: Get and process object pointers temporal_offsets, pointer_tokens, max_object_pointers_to_use = self._get_object_pointers( inference_session, obj_idx, frame_idx, num_total_frames, device, track_in_reverse_time, streaming ) num_object_pointer_tokens = 0 if pointer_tokens: object_pointers, object_pointers_pos_embed = self._process_object_pointers( temporal_offsets, pointer_tokens, max_object_pointers_to_use, batch_size, num_channels, device ) if object_pointers is not None: memories_to_concatenate.append(object_pointers) memory_positional_embeddings_to_concatenate.append(object_pointers_pos_embed) num_object_pointer_tokens = object_pointers.shape[0] # Step 4: Concatenate all retrieved memories and their positional embeddings combined_memory = torch.cat(memories_to_concatenate, dim=0) combined_memory_positional_embeddings = torch.cat(memory_positional_embeddings_to_concatenate, dim=0) # Step 5: Forward through the memory attention mechanism conditioned_feature_map_flat = self.memory_attention( current_vision_features=current_vision_features, current_vision_position_embeddings=current_vision_positional_embeddings, memory=combined_memory, memory_posision_embeddings=combined_memory_positional_embeddings, # Corrected typo from API num_object_pointer_tokens=num_object_pointer_tokens, num_spatial_memory_tokens=num_spatial_memory_tokens, ) # Reshape from (Batch, H*W, Channels) to (Batch, Channels, Height, Width) conditioned_feature_map = ( conditioned_feature_map_flat.squeeze(1).permute(0, 2, 1).view(batch_size, num_channels, height, width) ) return conditioned_feature_map def _encode_new_memory( self, current_vision_feats: torch.Tensor, pred_masks_high_res: torch.Tensor, object_score_logits: torch.Tensor, is_mask_from_pts: bool, ) -> tuple[torch.Tensor, list[torch.Tensor]]: """Encode the current image and its prediction into a memory feature.""" batch_size = current_vision_feats.size(1) # batch size on this frame channels = self.hidden_dim height, width = self.backbone_feature_sizes[-1] # top-level (lowest-resolution) feature size # top-level feature, (HW)BC => BCHW pix_feat = current_vision_feats.permute(1, 2, 0).view(batch_size, channels, height, width) if is_mask_from_pts and not self.training: # binarize the mask logits mask_for_mem = (pred_masks_high_res > 0).to(pred_masks_high_res.dtype) else: # apply sigmoid on the raw mask logits to turn them into range (0, 1) mask_for_mem = torch.sigmoid(pred_masks_high_res) # apply scale and bias terms to the sigmoid probabilities mask_for_mem = mask_for_mem * self.config.sigmoid_scale_for_mem_enc mask_for_mem = mask_for_mem + self.config.sigmoid_bias_for_mem_enc maskmem_features, maskmem_pos_enc = self.memory_encoder( pix_feat, mask_for_mem, ) # add a no-object embedding to the spatial memory to indicate that the frame # is predicted to be occluded (i.e. no object is appearing in the frame) if self.occlusion_spatial_embedding_parameter is not None: is_obj_appearing = (object_score_logits > 0).float() maskmem_features += (1 - is_obj_appearing[..., None]) * self.occlusion_spatial_embedding_parameter[ ..., None, None ].expand(*maskmem_features.shape) maskmem_pos_enc = maskmem_pos_enc.to(pred_masks_high_res.dtype) maskmem_features, maskmem_pos_enc = self.spatial_perceiver(maskmem_features, maskmem_pos_enc) maskmem_features = maskmem_features.to(pred_masks_high_res.dtype) maskmem_pos_enc = maskmem_pos_enc.to(pred_masks_high_res.dtype) return maskmem_features, maskmem_pos_enc def forward( self, inference_session: EdgeTamVideoInferenceSession, frame_idx: int | None = None, frame: torch.Tensor | None = None, reverse: bool = False, **kwargs, ) -> EdgeTamVideoSegmentationOutput: r""" inference_session (`EdgeTamVideoInferenceSession`): The video inference session object. frame_idx (`int`, *optional*): The index of the frame on which to run inference. No need to provide when inferring on a new streamed frame. frame (`torch.Tensor`, *optional*): The frame to process. Provide when streaming. reverse (`bool`, *optional*, defaults to `False`): Whether to propagate in reverse. """ if frame is not None: frame_idx = inference_session.add_new_frame(frame, frame_idx) if frame is not None and inference_session.get_obj_num() == 0: raise ValueError("No objects are provided for tracking; please add inputs first.") num_objects = inference_session.get_obj_num() pred_masks_per_obj = [None] * num_objects object_score_logits_per_obj = [None] * num_objects # Note: We avoid batched inference here because per-object inputs (clicks/masks) # can differ across objects. for obj_idx in range(num_objects): obj_id = inference_session.obj_idx_to_id(obj_idx) has_new_inputs = obj_id in inference_session.obj_with_new_inputs has_cond_output = frame_idx in inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"] # If this object has no new inputs and this frame already has a # conditioning output, reuse the cached masks instead of recomputing. if (not has_new_inputs) and has_cond_output: pred_masks = inference_session.get_output(obj_idx, frame_idx, "pred_masks", is_conditioning_frame=True) object_score_logits = inference_session.get_output( obj_idx, frame_idx, "object_score_logits", is_conditioning_frame=True ) is_init_cond_frame = True else: # Defaults when there are no new inputs is_init_cond_frame = False point_inputs = None mask_inputs = None if has_new_inputs: is_init_cond_frame = frame_idx not in inference_session.frames_tracked_per_obj[obj_idx] if is_init_cond_frame: reverse = False point_inputs = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None) mask_inputs = inference_session.mask_inputs_per_obj[obj_idx].get(frame_idx, None) if point_inputs is not None or mask_inputs is not None: inference_session.obj_with_new_inputs.remove(obj_id) current_out = self._run_single_frame_inference( inference_session=inference_session, obj_idx=obj_idx, frame_idx=frame_idx, batch_size=1, # run on the slice of a single object is_init_cond_frame=is_init_cond_frame, point_inputs=point_inputs, mask_inputs=mask_inputs, reverse=reverse, run_mem_encoder=True, streaming=frame is not None, ) inference_session.store_output( obj_idx, frame_idx, output_value=current_out, is_conditioning_frame=is_init_cond_frame ) pred_masks = current_out["pred_masks"] object_score_logits = current_out["object_score_logits"] pred_masks_per_obj[obj_idx] = pred_masks object_score_logits_per_obj[obj_idx] = object_score_logits.squeeze(-1) if not is_init_cond_frame: # only for tracked frames, not for initial conditioning frames inference_session.frames_tracked_per_obj[obj_idx][frame_idx] = {"reverse": reverse} # Resize the output mask to the original video resolution (we directly use # the mask scores on GPU for output to avoid any CPU conversion in between) if len(pred_masks_per_obj) > 1: all_pred_masks = torch.cat(pred_masks_per_obj, dim=0) all_object_score_logits = torch.cat(object_score_logits_per_obj, dim=0) else: all_pred_masks = pred_masks_per_obj[0] all_object_score_logits = object_score_logits_per_obj[0] return EdgeTamVideoSegmentationOutput( object_ids=inference_session.obj_ids.copy(), pred_masks=all_pred_masks, object_score_logits=all_object_score_logits, frame_idx=frame_idx, ) def _use_mask_as_output( self, backbone_features: torch.Tensor, high_res_features: list[torch.Tensor], mask_inputs: torch.Tensor, ) -> EdgeTamVideoImageSegmentationOutput: """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in forward above). """ # Use -10/+20 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 mask_inputs_float = mask_inputs.to(backbone_features[0].dtype) high_res_masks = mask_inputs_float * out_scale + out_bias low_res_masks = F.interpolate( high_res_masks.float(), size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), align_corners=False, mode="bilinear", antialias=True, # use antialias for downsampling ).to(backbone_features[0].dtype) # a dummy IoU prediction of all 1's under mask input iou_scores = mask_inputs.new_ones(mask_inputs.size(0), 1).to(backbone_features[0].dtype) # produce an object pointer using the SAM decoder from the mask input object_pointer = self._single_frame_forward( input_masks=self.mask_downsample(mask_inputs_float.to(backbone_features[0].dtype)), image_embeddings=high_res_features + [backbone_features], ).object_pointer # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying # on the object_scores from the SAM decoder. is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) is_obj_appearing = is_obj_appearing[..., None] lambda_is_obj_appearing = is_obj_appearing.to(backbone_features[0].dtype) object_score_logits = out_scale * lambda_is_obj_appearing + out_bias object_pointer = lambda_is_obj_appearing * object_pointer object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer return EdgeTamVideoImageSegmentationOutput( iou_scores=iou_scores, pred_masks=low_res_masks, high_res_masks=high_res_masks, object_pointer=object_pointer, object_score_logits=object_score_logits, image_embeddings=high_res_features + [backbone_features], ) def _run_single_frame_inference( self, inference_session: EdgeTamVideoInferenceSession, frame_idx: int, obj_idx: int, batch_size: int, is_init_cond_frame: bool, point_inputs: torch.Tensor | None, mask_inputs: torch.Tensor | None, reverse: bool, run_mem_encoder: bool, prev_sam_mask_logits: torch.Tensor | None = None, streaming: bool = False, ) -> dict[str, Any]: """ Perform a single tracking step for video object segmentation. Args: inference_session (`EdgeTamVideoInferenceSession`): The video inference session object. frame_idx (`int`): Index of the current frame. obj_idx (`int`): Index of the current object. batch_size (`int`): Batch size of the current frame. is_init_cond_frame (`bool`): Whether this is an initial conditioning frame with user inputs. point_inputs (`dict`, *optional*): Point prompt inputs for the current frame. mask_inputs (`torch.Tensor`, *optional*): Mask prompt inputs for the current frame. reverse (`bool`, *optional*, defaults to `False`): Whether to track in reverse time order. run_mem_encoder (`bool`, *optional*, defaults to `True`): Whether to run the memory encoder on predicted masks. prev_sam_mask_logits (`torch.Tensor`, *optional*): Previously predicted SAM mask logits that can be fed with new clicks. streaming (`bool`, *optional*, defaults to `False`): Whether this is streaming inference. Returns: `dict`: Dictionary containing the tracking results for the current frame, including: - pred_masks: Predicted low-resolution masks. - object_pointer: Object pointer for memory. - object_score_logits: Object score logits (inference only). - maskmem_features: Memory features for future frames. - maskmem_pos_enc: Memory positional encodings. """ # Retrieve correct image features current_vision_feats, current_vision_pos_embeds = self._prepare_vision_features( inference_session, frame_idx, batch_size ) # point and mask should not appear as input simultaneously on the same frame if point_inputs is not None and mask_inputs is not None: raise ValueError( "point_inputs and mask_inputs should not appear as input simultaneously on the same frame" ) # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW if len(current_vision_feats) > 1: high_res_features = [ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) for x, s in zip(current_vision_feats[:-1], self.backbone_feature_sizes[:-1]) ] else: high_res_features = None if mask_inputs is not None: # We directly output the mask input (see it as a GT mask) without using a SAM prompt encoder + mask decoder. pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *self.backbone_feature_sizes[-1]) sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs) else: # fused the visual feature with previous memory features in the memory bank pix_feat = self._prepare_memory_conditioned_features( inference_session=inference_session, frame_idx=frame_idx, obj_idx=obj_idx, is_initial_conditioning_frame=is_init_cond_frame, current_vision_features=current_vision_feats[-1], current_vision_positional_embeddings=current_vision_pos_embeds[-1], num_total_frames=inference_session.num_frames, track_in_reverse_time=reverse, streaming=streaming, ) # apply SAM-style segmentation head # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, # e.g. in demo where such logits come from earlier interaction instead of correction sampling # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) if prev_sam_mask_logits is not None: mask_inputs = prev_sam_mask_logits multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) sam_outputs = self._single_frame_forward( pixel_values=None, # Vision features already computed input_points=point_inputs["point_coords"] if point_inputs is not None else None, input_labels=point_inputs["point_labels"] if point_inputs is not None else None, input_masks=mask_inputs, image_embeddings=high_res_features + [pix_feat], multimask_output=multimask_output, ) # Finally run the memory encoder on the predicted mask to encode # it into a new memory feature (which will be used to condition vision features in future frames) maskmem_features = None maskmem_pos_enc = None if run_mem_encoder and self.num_maskmem > 0: maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats[-1], pred_masks_high_res=sam_outputs.high_res_masks, object_score_logits=sam_outputs.object_score_logits, is_mask_from_pts=(point_inputs is not None or mask_inputs is not None), ) current_out = { "pred_masks": sam_outputs.pred_masks, "object_pointer": sam_outputs.object_pointer, "maskmem_features": maskmem_features if maskmem_features is not None else None, "maskmem_pos_enc": maskmem_pos_enc, } if not self.training: current_out["object_score_logits"] = sam_outputs.object_score_logits return current_out def _batch_encode_memories(self): raise NotImplementedError("Batch memory encoding is not implemented for EdgeTamVideo yet.") # Todo, implement batch memory encoding for edgetam video __all__ = [ "EdgeTamVideoMaskDecoderConfig", "EdgeTamVideoPromptEncoderConfig", "EdgeTamVideoConfig", "EdgeTamVideoModel", "EdgeTamVideoInferenceSession", "EdgeTamVideoPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/edgetam_video/modular_edgetam_video.py", "license": "Apache License 2.0", "lines": 1311, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/edgetam/test_modeling_edgetam.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch EDGETAM model.""" import copy import gc import unittest import requests from parameterized import parameterized from transformers import ( EdgeTamConfig, EdgeTamMaskDecoderConfig, EdgeTamPromptEncoderConfig, EdgeTamVisionConfig, Sam2Processor, pipeline, ) from transformers.testing_utils import ( backend_empty_cache, require_torch, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from transformers.video_utils import load_video from ...test_configuration_common import ConfigTester from ...test_modeling_common import TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoConfig, EdgeTamModel, Sam2Processor if is_vision_available(): from PIL import Image class EdgeTamPromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=128, patch_size=16, mask_input_channels=8, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return EdgeTamPromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class EdgeTamMaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim def get_config(self): return EdgeTamMaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class EdgeTamModelTester: def __init__( self, parent, num_channels=3, image_size=128, hidden_size=12, patch_kernel_size=7, patch_stride=4, patch_padding=3, dim_mul=2.0, backbone_channel_list=[96, 48, 24, 12], backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]], fpn_hidden_size=32, memory_encoder_hidden_size=32, batch_size=2, is_training=True, ): self.parent = parent self.image_size = image_size self.hidden_size = hidden_size self.patch_kernel_size = patch_kernel_size self.patch_stride = patch_stride self.patch_padding = patch_padding self.dim_mul = dim_mul self.backbone_channel_list = backbone_channel_list self.backbone_feature_sizes = backbone_feature_sizes self.fpn_hidden_size = fpn_hidden_size self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.memory_encoder_hidden_size = memory_encoder_hidden_size self.prompt_encoder_tester = EdgeTamPromptEncoderTester() self.mask_decoder_tester = EdgeTamMaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): vision_config = EdgeTamVisionConfig( backbone_config=AutoConfig.from_pretrained( "timm/repvit_m1.dist_in1k", model_args={ "in_chans": 3, "features_only": True, "out_indices": (0, 1, 2, 3), "embed_dim": self.backbone_channel_list[::-1], }, ), backbone_channel_list=self.backbone_channel_list, backbone_feature_sizes=self.backbone_feature_sizes, fpn_hidden_size=self.fpn_hidden_size, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return EdgeTamConfig( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, memory_attention_hidden_size=self.hidden_size, memory_encoder_hidden_size=self.memory_encoder_hidden_size, image_size=self.image_size, mask_downsampler_embed_dim=32, memory_fuser_embed_dim=32, memory_attention_num_layers=1, memory_attention_feed_forward_hidden_size=32, ) def create_and_check_model(self, config, pixel_values): model = EdgeTamModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class EdgeTamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (EdgeTamModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": EdgeTamModel, "mask-generation": EdgeTamModel} if is_torch_available() else {} ) test_resize_embeddings = False _is_composite = True def setUp(self): self.model_tester = EdgeTamModelTester(self) common_properties = ["initializer_range"] self.config_tester = ConfigTester( self, config_class=EdgeTamConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Timm model does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Can't get or set embeddings for Timm model") def test_model_get_set_embeddings(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Override as diffence slightly higher than the threshold # def test_batching_equivalence(self, atol=5e-4, rtol=5e-4): # super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="TimmWrapperModel does not support an attention implementation") def test_can_set_attention_dynamically_composite_model(self): pass @unittest.skip(reason="vision_hidden_states from TimmWrapperModel") def test_hidden_states_output(self): pass @unittest.skip( reason="TIMM's attention implementation is self configured and won't raise ValueError on global attention implementation." ) def test_flash_attn_2_can_dispatch_composite_models(self): pass @unittest.skip("TimmWrapperModel cannot be tested with meta device") def test_can_be_initialized_on_meta(self): pass @unittest.skip("TimmWrapperModel cannot be tested with meta device") def test_can_load_with_meta_device_context_manager(self): pass ## Skip flash attention releated tests below ## correct configuration: ## from_pretrained(model_id, attn_implementation={"text_config": "flash_attention_2", "vision_config": "eager"} @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_fa2_generate(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_fa3_generate(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_from_config(self): pass @unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_sdpa_generate_with_dynamic_cache(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_3_inference_equivalence_right_padding(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_kernels_inference_equivalence(self): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_kernels_mps_inference_equivalence(self): pass @unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_eager_matches_sdpa_generate(self): pass @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @unittest.skip("Test requires hidden_states in outputs, which is not available in EdgeTamModel.forward's output") def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): pass @unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.") def test_flash_attn_2_inference_equivalence(self): pass @unittest.skip("EdgeTAM does not have language_model, vision_tower, multi_modal_projector.") def test_sdpa_can_dispatch_composite_models(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_attention_outputs(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_generate_compilation_all_outputs(self): pass @unittest.skip("Cannot set `output_attentions` for timm models.") def test_capture_outputs_decorator(self): pass @slow def test_model_from_pretrained(self): model_name = "yonigozlan/EdgeTAM-hf" model = EdgeTamModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="EDGETAM model can't be compiled dynamic yet") def test_model_outputs_equivalence(self): # Modified from upstream to remove output_hidden_states as the timm model doesn't support it config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return # model might return non-tensors objects (e.g. Cache class) elif isinstance(tuple_object, torch.Tensor): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(copy.deepcopy(config)) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) @unittest.skip("Cannot set `output_attentions` for timm models.") def test_get_image_features_attentions(self): pass @unittest.skip("Cannot set `output_hidden_states` for this timm model.") def test_get_image_features_hidden_states(self): pass def prepare_image(): img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_groceries_image(): img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_video(): video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4" raw_video, _ = load_video(video_url) return raw_video @slow class EdgeTamModelIntegrationTest(unittest.TestCase): def setUp(self): super().setUp() self.model = EdgeTamModel.from_pretrained("yonigozlan/EdgeTAM-hf").to(torch.float32) self.processor = Sam2Processor.from_pretrained("yonigozlan/EdgeTAM-hf") self.model.to(torch_device) self.model.eval() def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) def test_inference_mask_generation_one_point_multimask(self): raw_image = prepare_image() input_points = [[[[500, 375]]]] input_labels = [[[1]]] inputs = self.processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs) self.assertEqual(outputs.iou_scores.shape, (1, 1, 3)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 3, 256, 256)) sorted_indices = torch.argsort(outputs.iou_scores.squeeze(), descending=True) scores = outputs.iou_scores.squeeze()[sorted_indices] masks_logits = outputs.pred_masks.squeeze()[sorted_indices][0, :3, :3] torch.testing.assert_close( scores, torch.tensor([0.7621, 0.4859, 0.0461]).to(torch_device), atol=1e-4, rtol=1e-4 ) torch.testing.assert_close( masks_logits, torch.tensor( [[-19.5483, -22.3549, -26.0962], [-18.1821, -23.4761, -24.2262], [-20.3549, -24.5518, -22.7232]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_one_point_no_multimask(self): raw_image = prepare_image() input_points = [[[[500, 375]]]] input_labels = [[[1]]] inputs = self.processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs, multimask_output=False) self.assertEqual(outputs.iou_scores.shape, (1, 1, 1)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256)) scores = outputs.iou_scores.squeeze((0, 1)) masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3] torch.testing.assert_close(scores, torch.tensor([0.7621]).to(torch_device), atol=1e-4, rtol=1e-4) torch.testing.assert_close( masks_logits, torch.tensor( [[-19.5483, -22.3549, -26.0962], [-18.1821, -23.4761, -24.2262], [-20.3549, -24.5518, -22.7232]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_batched_images_multi_points(self): raw_image1 = prepare_image() raw_image2 = prepare_dog_img() input_points = [[[[500, 375]]], [[[770, 200], [730, 120]]]] input_labels = [[[1]], [[1, 0]]] inputs = self.processor( images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs) self.assertEqual(outputs.iou_scores.shape, (2, 1, 3)) self.assertEqual(outputs.pred_masks.shape, (2, 1, 3, 256, 256)) sorted_indices = torch.argsort(outputs.iou_scores[0].squeeze(), descending=True) scores1 = outputs.iou_scores[0].squeeze()[sorted_indices] masks_logits1 = outputs.pred_masks[0].squeeze()[sorted_indices][0, :3, :3] sorted_indices = torch.argsort(outputs.iou_scores[1].squeeze(), descending=True) scores2 = outputs.iou_scores[1].squeeze()[sorted_indices] masks_logits2 = outputs.pred_masks[1].squeeze()[sorted_indices][0, :3, :3] torch.testing.assert_close( scores1, torch.tensor([0.7490, 0.4685, 0.0463]).to(torch_device), atol=1e-4, rtol=1e-4 ) torch.testing.assert_close( masks_logits1, torch.tensor( [[-19.1423, -21.6488, -25.6816], [-17.8018, -22.6512, -23.5699], [-19.9140, -23.6919, -22.3147]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) torch.testing.assert_close( scores2, torch.tensor([0.7225, 0.6515, 0.6350]).to(torch_device), atol=1e-4, rtol=1e-4 ) torch.testing.assert_close( masks_logits2, torch.tensor([[-8.8259, -7.7961, -9.3665], [-8.2648, -8.7771, -9.1390], [-9.5951, -8.3995, -9.0599]]).to( torch_device ), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_batched_images_batched_points_multi_points(self): raw_image1 = prepare_image() raw_image2 = prepare_groceries_image() input_points = [[[[500, 375]], [[650, 750]]], [[[400, 300]], [[630, 300], [550, 300]]]] input_labels = [[[1], [1]], [[1], [1, 1]]] inputs = self.processor( images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs, multimask_output=False) self.assertEqual(outputs.iou_scores.shape, (2, 2, 1)) self.assertEqual(outputs.pred_masks.shape, (2, 2, 1, 256, 256)) torch.testing.assert_close( outputs.iou_scores, torch.tensor([[[0.7490], [0.9397]], [[0.7952], [0.8723]]]).to(torch_device), atol=1e-4, rtol=1e-4, ) torch.testing.assert_close( outputs.pred_masks[:, :, :, :2, :2], torch.tensor( [ [[[[-19.1423, -21.6488], [-17.8018, -22.6512]]], [[[-7.1591, -9.8201], [-7.4133, -9.2781]]]], [[[[-16.7645, -15.2790], [-16.1805, -16.2937]]], [[[-8.5934, -8.4215], [-8.1873, -8.3722]]]], ] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_batched_images_batched_boxes(self): raw_image1 = prepare_image() raw_image2 = prepare_groceries_image() input_boxes = [ [[75, 275, 1725, 850], [425, 600, 700, 875], [1375, 550, 1650, 800], [1240, 675, 1400, 750]], [[450, 170, 520, 350], [350, 190, 450, 350], [500, 170, 580, 350], [580, 170, 640, 350]], ] inputs = self.processor(images=[raw_image1, raw_image2], input_boxes=input_boxes, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = self.model(**inputs, multimask_output=False) self.assertEqual(outputs.iou_scores.shape, (2, 4, 1)) self.assertEqual(outputs.pred_masks.shape, (2, 4, 1, 256, 256)) torch.testing.assert_close( outputs.iou_scores, torch.tensor([[[0.9773], [0.9415], [0.9683], [0.8792]], [[0.9721], [0.9852], [0.9812], [0.9760]]]).to( torch_device ), atol=1e-4, rtol=1e-4, ) torch.testing.assert_close( outputs.pred_masks[:, :, :, :2, :2], torch.tensor( [ [ [[[-12.6412, -12.0553], [-11.8415, -13.1696]]], [[[-16.0378, -19.9641], [-15.4939, -19.0260]]], [[[-18.8254, -23.6185], [-17.7889, -23.2116]]], [[[-25.7024, -29.8722], [-22.9264, -30.0557]]], ], [ [[[-19.0264, -17.0396], [-16.9458, -16.3287]]], [[[-20.9671, -19.2132], [-18.5827, -18.0511]]], [[[-22.4642, -19.7389], [-19.4541, -19.4717]]], [[[-21.9226, -18.6297], [-18.9272, -18.8151]]], ], ] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_from_existing_points_and_mask(self): raw_image = prepare_image() input_points = [[[[500, 375]]]] input_labels = [[[1]]] original_inputs = self.processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**original_inputs) # best mask to use as input for new points mask_input = outputs.pred_masks[:, :, torch.argmax(outputs.iou_scores)] new_input_points = [[[[500, 375], [1125, 625]]]] new_input_labels = [[[1, 1]]] inputs = self.processor( input_points=new_input_points, input_labels=new_input_labels, original_sizes=original_inputs["original_sizes"], return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = self.model( **inputs, input_masks=mask_input, image_embeddings=outputs.image_embeddings, multimask_output=False, ) self.assertEqual(outputs.iou_scores.shape, (1, 1, 1)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256)) scores = outputs.iou_scores.squeeze((0, 1)) masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3] torch.testing.assert_close(scores, torch.tensor([0.9431]).to(torch_device), atol=1e-4, rtol=1e-4) torch.testing.assert_close( masks_logits, torch.tensor([[-4.1968, -4.9034, -6.0680], [-4.4053, -5.1200, -5.8580], [-4.3920, -5.5096, -5.8166]]).to( torch_device ), atol=1e-4, rtol=1e-4, ) # with negative point new_input_points = [[[[500, 375], [1125, 625]]]] new_input_labels = [[[1, 0]]] inputs = self.processor( input_points=new_input_points, input_labels=new_input_labels, original_sizes=original_inputs["original_sizes"], return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = self.model( **inputs, input_masks=mask_input, image_embeddings=outputs.image_embeddings, multimask_output=False, ) self.assertEqual(outputs.iou_scores.shape, (1, 1, 1)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256)) scores = outputs.iou_scores.squeeze((0, 1)) masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3] torch.testing.assert_close(scores, torch.tensor([0.9695]).to(torch_device), atol=1e-4, rtol=1e-4) torch.testing.assert_close( masks_logits, torch.tensor( [[-14.3212, -15.4295, -17.4482], [-13.2246, -15.9468, -17.1341], [-15.1678, -16.4498, -14.7385]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_dummy_pipeline_generation(self): generator = pipeline("mask-generation", model="yonigozlan/EdgeTAM-hf", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/edgetam/test_modeling_edgetam.py", "license": "Apache License 2.0", "lines": 616, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/edgetam_video/test_modeling_edgetam_video.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SAM2 model.""" import gc import unittest import requests from transformers.testing_utils import ( backend_empty_cache, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from transformers.video_utils import load_video if is_torch_available(): import torch from transformers import EdgeTamVideoModel, Sam2VideoProcessor if is_vision_available(): from PIL import Image def prepare_image(): img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_groceries_image(): img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_video(): video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4" raw_video, _ = load_video(video_url) return raw_video @slow class EdgeTamVideoModelIntegrationTest(unittest.TestCase): def setUp(self): super().setUp() self.video_model = EdgeTamVideoModel.from_pretrained("yonigozlan/EdgeTAM-hf").to(torch.float32) self.processor = Sam2VideoProcessor.from_pretrained("yonigozlan/EdgeTAM-hf") self.video_model.to(torch_device) self.video_model.eval() def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) def test_inference_mask_generation_video_one_point(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_points=[[[[210, 350]]]], input_labels=[[[1]]], ) outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) video_res_masks = self.processor.post_process_masks([low_res_masks], [raw_video.shape[-3:-1]], binarize=False)[ 0 ] self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[0, 0, :3, :3], torch.tensor( [[-28.3880, -28.3880, -27.9277], [-27.5260, -27.5260, -27.2455], [-25.5902, -25.5902, -25.7136]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-28.3880, -28.3880], [-27.5260, -27.5260]]]], [[[[-15.3350, -15.3350], [-15.0002, -15.0002]]]], [[[[-14.8729, -14.8729], [-14.6724, -14.6724]]]], ], ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_points=[[[[210, 350]]]], input_labels=[[[1]]], ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) print(f"VIDEO_TEST2 - ACTUAL frames[:3, :, :, :2, :2]: {frames[:3, :, :, :2, :2]}") torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-28.3880, -28.3880], [-27.5260, -27.5260]]]], [[[[-15.3350, -15.3350], [-15.0002, -15.0002]]]], [[[[-14.8729, -14.8729], [-14.6724, -14.6724]]]], ] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_video_multi_points(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_points=[[[[210, 350], [250, 220]]]], input_labels=[[[1, 1]]], ) outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks video_res_masks = self.processor.post_process_masks( [outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[0, 0, :3, :3], torch.tensor( [[-17.3081, -17.3081, -16.9805], [-16.8430, -16.8430, -16.6766], [-15.7986, -15.7986, -15.9941]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) # higher tolerance due to errors propagating from frame to frame torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-17.3081, -17.3081], [-16.8430, -16.8430]]]], [[[[-14.9302, -14.9302], [-14.8802, -14.8802]]]], [[[[-14.4372, -14.4372], [-14.3697, -14.3697]]]], ] ).to(torch_device), atol=1e-2, rtol=1e-2, ) def test_inference_mask_generation_video_one_bb(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_boxes=[[[300, 0, 500, 400]]], ) outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks video_res_masks = self.processor.post_process_masks( [outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[0, 0, :3, :3], torch.tensor( [[-17.3245, -17.3245, -16.9231], [-16.8773, -16.8773, -16.6082], [-15.8731, -15.8731, -15.9011]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) # higher tolerance due to errors propagating from frame to frame torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-17.3245, -17.3245], [-16.8773, -16.8773]]]], [[[[-16.2826, -16.2826], [-15.9087, -15.9087]]]], [[[[-15.8716, -15.8716], [-15.3992, -15.3992]]]], ] ).to(torch_device), atol=1e-2, rtol=1e-2, ) def test_inference_mask_generation_video_one_point_one_bb(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_boxes=[[[300, 0, 500, 400]]], input_points=[[[[460, 60]]]], input_labels=[[[1]]], ) outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks video_res_masks = self.processor.post_process_masks( [outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[0, 0, :3, :3], torch.tensor( [[-13.9780, -13.9780, -13.7824], [-13.7642, -13.7642, -13.6000], [-13.2842, -13.2842, -13.1904]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) # higher tolerance due to errors propagating from frame to frame torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-13.9780, -13.9780], [-13.7642, -13.7642]]]], [[[[-16.0142, -16.0142], [-15.5600, -15.5600]]]], [[[[-16.7568, -16.7568], [-16.2460, -16.2460]]]], ] ).to(torch_device), atol=1e-2, rtol=1e-2, ) def test_inference_mask_generation_video_multi_objects_multi_points(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_ids, input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]], input_labels=[[[1, 1, 0], [1]]], ) outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks video_res_masks = self.processor.post_process_masks( [outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(low_res_masks.shape, (2, 1, 256, 256)) self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[:, 0, :2, :2], # first object torch.tensor( [[[-12.6233, -12.6233], [-12.1809, -12.1809]], [[-13.4556, -13.4556], [-12.9549, -12.9549]]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-12.6233, -12.6233], [-12.1809, -12.1809]]], [[[-13.4556, -13.4556], [-12.9549, -12.9549]]]], [[[[-12.5589, -12.5589], [-12.4450, -12.4450]]], [[[-12.2181, -12.2181], [-12.0188, -12.0188]]]], [[[[-15.3170, -15.3170], [-15.0254, -15.0254]]], [[[-11.4912, -11.4912], [-11.3171, -11.3171]]]], ] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_propagate_video_from_mask_input(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) # get input_mask self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_points=[[[[210, 350], [250, 220]]]], input_labels=[[[1, 1]]], ) sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) # set mask as input self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_masks=self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0], ) sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = sam2_video_output.pred_masks self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[0, 0, :3, :3], torch.tensor( [[-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-10.0000, -10.0000], [-10.0000, -10.0000]]]], [[[[-17.4083, -17.4083], [-17.2256, -17.2256]]]], [[[[-13.8533, -13.8533], [-13.7759, -13.7759]]]], ], ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_propagate_on_streamed_video(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(inference_device=torch_device) video_res_masks = [] max_frame_num_to_track = 3 for frame_idx, frame in enumerate(raw_video): if frame_idx >= max_frame_num_to_track: break inputs = self.processor(images=frame, device=torch_device, return_tensors="pt") if frame_idx == 0: self.processor.add_inputs_to_inference_session( inference_session, frame_idx=0, obj_ids=1, input_points=[[[[210, 350], [250, 220]]]], input_labels=[[[1, 1]]], original_size=inputs.original_sizes[0], ) sam2_video_output = self.video_model(inference_session=inference_session, frame=inputs.pixel_values[0]) video_res_masks.append( self.processor.post_process_masks( [sam2_video_output.pred_masks], inputs.original_sizes, binarize=False )[0] ) video_res_masks = torch.stack(video_res_masks, dim=0) self.assertEqual( video_res_masks.shape, (max_frame_num_to_track, 1, 1, raw_video.shape[-3], raw_video.shape[-2]) ) # higher tolerance due to errors propagating from frame to frame print(f"VIDEO_TEST8 - ACTUAL video_res_masks[:3, :, :, :2, :2]: {video_res_masks[:3, :, :, :2, :2]}") torch.testing.assert_close( video_res_masks[:3, :, :, :2, :2], torch.tensor( [ [[[[-17.3081, -17.3081], [-16.8430, -16.8430]]]], [[[[-14.9302, -14.9302], [-14.8802, -14.8802]]]], [[[[-14.4372, -14.4372], [-14.3697, -14.3697]]]], ] ).to(torch_device), atol=1e-2, rtol=1e-2, ) def test_inference_with_different_dtypes(self): """Test that inference works correctly for float32, bfloat16, and float16 dtypes.""" raw_video = prepare_video() dtypes_to_test = [ (torch.float32, None), # float32 is always available (torch.bfloat16, is_torch_bf16_available_on_device), (torch.float16, is_torch_fp16_available_on_device), ] for dtype, availability_check in dtypes_to_test: with self.subTest(dtype=dtype): # Skip if dtype is not available on device if availability_check is not None and not availability_check(torch_device): self.skipTest(f"{dtype} not supported on {torch_device}") # Load model with specific dtype video_model = EdgeTamVideoModel.from_pretrained("yonigozlan/EdgeTAM-hf", torch_dtype=dtype).to( torch_device ) video_model.eval() # Initialize inference session inference_session = self.processor.init_video_session( video=raw_video, inference_device=torch_device, dtype=dtype ) ann_frame_idx = 0 ann_obj_id = 1 # Add inputs self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_points=[[[[210, 350]]]], input_labels=[[[1]]], ) # Run inference on first frame outputs = video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks # Verify output shape and dtype self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) self.assertEqual(low_res_masks.dtype, dtype) # Post-process masks video_res_masks = self.processor.post_process_masks( [low_res_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) # Test propagation across multiple frames to test memory handling frames = [] max_frame_num_to_track = 2 for sam2_video_output in video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=max_frame_num_to_track, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) # Verify dtype is maintained during propagation self.assertEqual(sam2_video_output.pred_masks.dtype, dtype) frames = torch.stack(frames, dim=0) # Verify we got the expected number of frames (initial frame + max_frame_num_to_track) self.assertEqual( frames.shape, (max_frame_num_to_track + 1, 1, 1, raw_video.shape[-3], raw_video.shape[-2]) ) # Verify dtype is maintained in stacked frames self.assertEqual(frames.dtype, dtype)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/edgetam_video/test_modeling_edgetam_video.py", "license": "Apache License 2.0", "lines": 529, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/parakeet/configuration_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parakeet model configuration.""" from ...configuration_utils import PreTrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ParakeetEncoderConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ParakeetEncoder`]. It is used to instantiate a `ParakeetEncoder` model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimension of the layers and the hidden states. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. attention_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the attention layers. convolution_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in convolutions of the conformer's convolution module. conv_kernel_size (`int`, *optional*, defaults to 9): The kernel size of the convolution layers in the Conformer block. subsampling_factor (`int`, *optional*, defaults to 8): The factor by which the input sequence is subsampled. subsampling_conv_channels (`int`, *optional*, defaults to 256): The number of channels in the subsampling convolution layers. num_mel_bins (`int`, *optional*, defaults to 80): Number of mel features. subsampling_conv_kernel_size (`int`, *optional*, defaults to 3): The kernel size of the subsampling convolution layers. subsampling_conv_stride (`int`, *optional*, defaults to 2): The stride of the subsampling convolution layers. dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for all fully connected layers in the embeddings, encoder, and pooler. dropout_positions (`float`, *optional*, defaults to 0.0): The dropout ratio for the positions in the input sequence. layerdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the layers in the encoder. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention layers. max_position_embeddings (`int`, *optional*, defaults to 5000): The maximum sequence length that this model might ever be used with. scale_input (`bool`, *optional*, defaults to `True`): Whether to scale the input embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import ParakeetEncoderModel, ParakeetEncoderConfig >>> # Initializing a `ParakeetEncoder` configuration >>> configuration = ParakeetEncoderConfig() >>> # Initializing a model from the configuration >>> model = ParakeetEncoderModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` This configuration class is based on the ParakeetEncoder architecture from NVIDIA NeMo. You can find more details and pre-trained models at [nvidia/parakeet-ctc-1.1b](https://huggingface.co/nvidia/parakeet-ctc-1.1b). """ model_type = "parakeet_encoder" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, hidden_size=1024, num_hidden_layers=24, num_attention_heads=8, intermediate_size=4096, hidden_act="silu", attention_bias=True, convolution_bias=True, conv_kernel_size=9, subsampling_factor=8, subsampling_conv_channels=256, num_mel_bins=80, subsampling_conv_kernel_size=3, subsampling_conv_stride=2, dropout=0.1, dropout_positions=0.0, layerdrop=0.1, activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=5000, scale_input=True, initializer_range=0.02, **kwargs, ): self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_attention_heads # LlamaAttention compatibility self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.attention_bias = attention_bias self.convolution_bias = convolution_bias self.conv_kernel_size = conv_kernel_size self.subsampling_conv_kernel_size = subsampling_conv_kernel_size self.subsampling_conv_stride = subsampling_conv_stride self.subsampling_factor = subsampling_factor self.subsampling_conv_channels = subsampling_conv_channels self.num_mel_bins = num_mel_bins self.dropout = dropout self.dropout_positions = dropout_positions self.layerdrop = layerdrop self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.scale_input = scale_input self.initializer_range = initializer_range super().__init__( **kwargs, ) class ParakeetCTCConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ParakeetForCTC`]. It is used to instantiate a Parakeet CTC model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 1025): Vocabulary size of the model. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`ParakeetForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `True`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`ParakeetForCTC`]. encoder_config (`Union[dict, ParakeetEncoderConfig]`, *optional*): The config object or dictionary of the encoder. pad_token_id (`int`, *optional*, defaults to 1024): Padding token id. Also used as blank token id. Example: ```python >>> from transformers import ParakeetForCTC, ParakeetCTCConfig >>> # Initializing a Parakeet configuration >>> configuration = ParakeetCTCConfig() >>> # Initializing a model from the configuration >>> model = ParakeetForCTC(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` This configuration class is based on the Parakeet CTC architecture from NVIDIA NeMo. You can find more details and pre-trained models at [nvidia/parakeet-ctc-1.1b](https://huggingface.co/nvidia/parakeet-ctc-1.1b). """ model_type = "parakeet_ctc" sub_configs = {"encoder_config": ParakeetEncoderConfig} def __init__( self, vocab_size=1025, ctc_loss_reduction="mean", ctc_zero_infinity=True, encoder_config: dict | ParakeetEncoderConfig = None, pad_token_id=1024, **kwargs, ): self.vocab_size = vocab_size self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity if isinstance(encoder_config, dict): self.encoder_config = ParakeetEncoderConfig(**encoder_config) elif encoder_config is None: self.encoder_config = ParakeetEncoderConfig() self.encoder_config = self.encoder_config self.initializer_range = self.encoder_config.initializer_range self.pad_token_id = pad_token_id super().__init__(**kwargs) @classmethod def from_encoder_config(cls, encoder_config: ParakeetEncoderConfig, **kwargs): r""" Instantiate a [`ParakeetCTCConfig`] (or a derived class) from parakeet encoder model configuration. Returns: [`ParakeetCTCConfig`]: An instance of a configuration object """ return cls(encoder_config=encoder_config.to_dict(), **kwargs) __all__ = ["ParakeetCTCConfig", "ParakeetEncoderConfig"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/parakeet/configuration_parakeet.py", "license": "Apache License 2.0", "lines": 195, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/parakeet/convert_nemo_to_hf.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import os import re import tarfile import torch import yaml from tokenizers import AddedToken from transformers import ( ParakeetCTCConfig, ParakeetEncoder, ParakeetEncoderConfig, ParakeetFeatureExtractor, ParakeetForCTC, ParakeetProcessor, ParakeetTokenizer, ) from transformers.convert_slow_tokenizer import ParakeetConverter from transformers.utils.hub import cached_file NEMO_TO_HF_WEIGHT_MAPPING = { r"encoder\.pre_encode\.conv\.": r"encoder.subsampling.layers.", r"encoder\.pre_encode\.out\.": r"encoder.subsampling.linear.", r"encoder\.pos_enc\.": r"encoder.encode_positions.", r"encoder\.layers\.(\d+)\.conv\.batch_norm\.": r"encoder.layers.\1.conv.norm.", r"decoder\.decoder_layers\.0\.(weight|bias)": r"ctc_head.\1", r"linear_([kv])": r"\1_proj", r"linear_out": r"o_proj", r"linear_q": r"q_proj", r"pos_bias_([uv])": r"bias_\1", r"linear_pos": r"relative_k_proj", } def convert_key(key, mapping): for pattern, replacement in mapping.items(): key = re.sub(pattern, replacement, key) return key def extract_nemo_archive(nemo_file_path: str, extract_dir: str) -> dict[str, str]: """ Extract .nemo file (tar archive) and return paths to important files. Args: nemo_file_path: Path to .nemo file extract_dir: Directory to extract to Returns: Dictionary with paths to model.pt, model_config.yaml, etc. """ print(f"Extracting NeMo archive: {nemo_file_path}") with tarfile.open(nemo_file_path, "r", encoding="utf-8") as tar: tar.extractall(extract_dir) # Log all extracted files for debugging all_files = [] for root, dirs, files in os.walk(extract_dir): for file in files: file_path = os.path.join(root, file) all_files.append(file_path) print(f"All extracted files: {[os.path.basename(f) for f in all_files]}") # Find important files with more robust detection model_files = {} for root, dirs, files in os.walk(extract_dir): for file in files: file_path = os.path.join(root, file) file_lower = file.lower() # Look for model weights with various common names if ( file.endswith(".pt") or file.endswith(".pth") or file.endswith(".ckpt") or file.endswith(".bin") or "model" in file_lower and ("weight" in file_lower or "state" in file_lower) or file_lower == "model.pt" or file_lower == "pytorch_model.bin" or file_lower == "model_weights.ckpt" ): model_files["model_weights"] = file_path print(f"Found model weights: {file}") # Look for config files elif ( file == "model_config.yaml" or file == "config.yaml" or (file.endswith(".yaml") and "config" in file_lower) ): if "model_config" not in model_files: # Prefer model_config.yaml model_files["model_config"] = file_path print(f"Found config file: {file}") if file == "model_config.yaml": model_files["model_config"] = file_path # Override with preferred name # Look for vocabulary files elif ( file.endswith(".vocab") or file.endswith(".model") or file.endswith(".txt") or ("tokenizer" in file_lower and (file.endswith(".vocab") or file.endswith(".model"))) ): # Prefer .vocab files over others if "tokenizer_model_file" not in model_files or file.endswith(".model"): model_files["tokenizer_model_file"] = file_path print(f"Found tokenizer model file: {file}") else: print(f"Found additional vocabulary file (using existing): {file}") print(f"Found model files: {list(model_files.keys())}") # Validate that we found the required files if "model_weights" not in model_files: raise FileNotFoundError( f"Could not find model weights file in {nemo_file_path}. " f"Expected files with extensions: .pt, .pth, .ckpt, .bin. " f"Found files: {[os.path.basename(f) for f in all_files]}" ) if "model_config" not in model_files: raise FileNotFoundError( f"Could not find model config file in {nemo_file_path}. " f"Expected: model_config.yaml or config.yaml. " f"Found files: {[os.path.basename(f) for f in all_files]}" ) return model_files def write_processor(nemo_config: dict, model_files, output_dir, push_to_repo_id=None): tokenizer_converted = ParakeetConverter(model_files["tokenizer_model_file"]).converted() tokenizer_converted_fast = ParakeetTokenizer( tokenizer_object=tokenizer_converted, clean_up_tokenization_spaces=False, ) tokenizer_converted_fast.add_tokens( [AddedToken("<unk>", normalized=False, special=True), AddedToken("<pad>", normalized=False, special=True)] ) tokenizer_converted_fast.add_special_tokens( { "pad_token": AddedToken("<pad>", normalized=False, special=True), "unk_token": AddedToken("<unk>", normalized=False, special=True), } ) feature_extractor_keys_to_ignore = ["_target_", "pad_to", "frame_splicing", "dither", "normalize", "window", "log"] feature_extractor_config_keys_mapping = { "sample_rate": "sampling_rate", "window_size": "win_length", "window_stride": "hop_length", "window": "window", "n_fft": "n_fft", "log": "log", "features": "feature_size", "dither": "dither", "pad_to": "pad_to", "pad_value": "padding_value", "frame_splicing": "frame_splicing", "preemphasis": "preemphasis", "hop_length": "hop_length", } converted_feature_extractor_config = {} for key, value in nemo_config["preprocessor"].items(): if key in feature_extractor_keys_to_ignore: continue if key in feature_extractor_config_keys_mapping: if key in ["window_size", "window_stride"]: value = int(value * nemo_config["preprocessor"]["sample_rate"]) converted_feature_extractor_config[feature_extractor_config_keys_mapping[key]] = value else: raise ValueError(f"Key {key} not found in feature_extractor_keys_mapping") feature_extractor = ParakeetFeatureExtractor(**converted_feature_extractor_config) processor = ParakeetProcessor( feature_extractor=feature_extractor, tokenizer=tokenizer_converted_fast, ) processor.save_pretrained(output_dir) if push_to_repo_id: processor.push_to_hub(push_to_repo_id) def convert_encoder_config(nemo_config): """Convert NeMo encoder config to HF encoder config.""" encoder_keys_to_ignore = [ "att_context_size", "causal_downsampling", "stochastic_depth_start_layer", "feat_out", "stochastic_depth_drop_prob", "_target_", "ff_expansion_factor", "untie_biases", "att_context_style", "self_attention_model", "conv_norm_type", "subsampling", "stochastic_depth_mode", "conv_context_size", "dropout_pre_encoder", "reduction", "reduction_factor", "reduction_position", ] encoder_config_keys_mapping = { "d_model": "hidden_size", "n_heads": "num_attention_heads", "n_layers": "num_hidden_layers", "feat_in": "num_mel_bins", "conv_kernel_size": "conv_kernel_size", "subsampling_factor": "subsampling_factor", "subsampling_conv_channels": "subsampling_conv_channels", "pos_emb_max_len": "max_position_embeddings", "dropout": "dropout", "dropout_emb": "dropout_positions", "dropout_att": "attention_dropout", "xscaling": "scale_input", "use_bias": "attention_bias", } converted_encoder_config = {} for key, value in nemo_config["encoder"].items(): if key in encoder_keys_to_ignore: continue if key in encoder_config_keys_mapping: converted_encoder_config[encoder_config_keys_mapping[key]] = value # NeMo uses 'use_bias' for both attention and convolution bias, but HF separates them if key == "use_bias": converted_encoder_config["convolution_bias"] = value else: raise ValueError(f"Key {key} not found in encoder_config_keys_mapping") return ParakeetEncoderConfig(**converted_encoder_config) def load_and_convert_state_dict(model_files): """Load NeMo state dict and convert keys to HF format.""" state_dict = torch.load(model_files["model_weights"], map_location="cpu", weights_only=True) converted_state_dict = {} for key, value in state_dict.items(): # Skip preprocessing weights (featurizer components) if key.endswith("featurizer.window") or key.endswith("featurizer.fb"): print(f"Skipping preprocessing weight: {key}") continue converted_key = convert_key(key, NEMO_TO_HF_WEIGHT_MAPPING) converted_state_dict[converted_key] = value return converted_state_dict def write_ctc_model(encoder_config, converted_state_dict, output_dir, push_to_repo_id=None): """Write CTC model using encoder config and converted state dict.""" model_config = ParakeetCTCConfig.from_encoder_config(encoder_config) print("Loading the checkpoint in a Parakeet CTC model.") with torch.device("meta"): model = ParakeetForCTC(model_config) model.load_state_dict(converted_state_dict, strict=True, assign=True) print("Checkpoint loaded successfully.") del model.config._name_or_path print("Saving the model.") model.save_pretrained(output_dir) if push_to_repo_id: model.push_to_hub(push_to_repo_id) del model # Safety check: reload the converted model gc.collect() print("Reloading the model to check if it's saved correctly.") ParakeetForCTC.from_pretrained(output_dir, dtype=torch.bfloat16, device_map="auto") print("Model reloaded successfully.") def write_encoder_model(encoder_config, converted_state_dict, output_dir, push_to_repo_id=None): """Write encoder model using encoder config and converted state dict.""" # Filter to only encoder weights (exclude CTC head if present) encoder_state_dict = { k.replace("encoder.", "", 1) if k.startswith("encoder.") else k: v for k, v in converted_state_dict.items() if k.startswith("encoder.") } print("Loading the checkpoint in a Parakeet Encoder model (for TDT).") with torch.device("meta"): model = ParakeetEncoder(encoder_config) model.load_state_dict(encoder_state_dict, strict=True, assign=True) print("Checkpoint loaded successfully.") del model.config._name_or_path print("Saving the model.") model.save_pretrained(output_dir) if push_to_repo_id: model.push_to_hub(push_to_repo_id) del model # Safety check: reload the converted model gc.collect() print("Reloading the model to check if it's saved correctly.") ParakeetEncoder.from_pretrained(output_dir, dtype=torch.bfloat16, device_map="auto") print("Model reloaded successfully.") def write_model(nemo_config, model_files, model_type, output_dir, push_to_repo_id=None): """Main model conversion function.""" # Step 1: Convert encoder config (shared across all model types) encoder_config = convert_encoder_config(nemo_config) print(f"Converted encoder config: {encoder_config}") # Step 2: Load and convert state dict (shared across all model types) converted_state_dict = load_and_convert_state_dict(model_files) # Step 3: Write model based on type if model_type == "encoder": write_encoder_model(encoder_config, converted_state_dict, output_dir, push_to_repo_id) elif model_type == "ctc": write_ctc_model(encoder_config, converted_state_dict, output_dir, push_to_repo_id) else: raise ValueError(f"Model type {model_type} not supported.") def main( hf_repo_id, output_dir, model_type, push_to_repo_id=None, ): nemo_filename = f"{hf_repo_id.split('/')[-1]}.nemo" filepath = cached_file(hf_repo_id, nemo_filename) model_files = extract_nemo_archive(filepath, os.path.dirname(filepath)) nemo_config = yaml.load(open(model_files["model_config"], "r"), Loader=yaml.FullLoader) write_processor(nemo_config, model_files, output_dir, push_to_repo_id) write_model(nemo_config, model_files, model_type, output_dir, push_to_repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--hf_repo_id", required=True, help="Model repo on huggingface.co") parser.add_argument( "--model_type", required=True, choices=["encoder", "ctc"], help="Model type (`encoder`, `ctc`)" ) parser.add_argument("--output_dir", required=True, help="Output directory for HuggingFace model") parser.add_argument("--push_to_repo_id", help="Repository ID to push the model to on the Hub") args = parser.parse_args() main( args.hf_repo_id, args.output_dir, args.model_type, args.push_to_repo_id, )
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/parakeet/convert_nemo_to_hf.py", "license": "Apache License 2.0", "lines": 320, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/parakeet/feature_extraction_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, is_librosa_available, logging from ...utils.import_utils import requires if is_librosa_available(): import librosa EPSILON = 1e-5 LOG_ZERO_GUARD_VALUE = 2**-24 logger = logging.get_logger(__name__) @requires(backends=("torch", "librosa")) class ParakeetFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Parakeet feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the `Short Time Fourier Transform` which should match pytorch's `torch.stft` equivalent. Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). hop_length (`int`, *optional*, defaults to 160): Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients. n_fft (`int`, *optional*, defaults to 512): Size of the Fourier transform. win_length (`int`, *optional*, defaults to 400): The window length for the STFT computation. preemphasis (`float`, *optional*, defaults to 0.97): A preemphasis filter coefficient. 0.0 means no preemphasis filter. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. """ model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=16000, hop_length=160, n_fft=512, win_length=400, preemphasis=0.97, padding_value=0.0, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.hop_length = hop_length self.n_fft = n_fft self.win_length = win_length self.preemphasis = preemphasis # TODO: @eustlb, for now we use librosa to compute the mel filters # indeed mel_filter_bank uses np.float64 (while librosa uses np.float32), giving numerical differences # self.mel_filters = mel_filter_bank( # num_frequency_bins=n_fft // 2 + 1, # num_mel_filters=feature_size, # min_frequency=0.0, # max_frequency=sampling_rate / 2, # sampling_rate=sampling_rate, # norm="slaney", # mel_scale="slaney", # ) mel_filters = librosa.filters.mel( sr=sampling_rate, n_fft=n_fft, n_mels=feature_size, fmin=0.0, fmax=sampling_rate / 2, norm="slaney" ) self.mel_filters = torch.from_numpy(mel_filters).to(torch.float32) def _torch_extract_fbank_features(self, waveform, device="cpu"): # spectrogram window = torch.hann_window(self.win_length, periodic=False, device=device) stft = torch.stft( waveform, self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=window, return_complex=True, pad_mode="constant", ) # Let's math original implementation # magnitudes = torch.abs(stft) ** 2 magnitudes = torch.view_as_real(stft) magnitudes = torch.sqrt(magnitudes.pow(2).sum(-1)) magnitudes = magnitudes.pow(2) # log mel spectrogram mel_filters = self.mel_filters.to(device) mel_spec = mel_filters @ magnitudes mel_spec = torch.log(mel_spec + LOG_ZERO_GUARD_VALUE) # (batch_size, num_mel_filters, num_frames) -> (batch_size, num_frames, num_mel_filters) mel_spec = mel_spec.permute(0, 2, 1) return mel_spec def __call__( self, raw_speech: np.ndarray | list[float] | list[np.ndarray] | list[list[float]], truncation: bool = False, pad_to_multiple_of: int | None = None, return_tensors: str | TensorType | None = None, return_attention_mask: bool | None = None, padding: str | None = "longest", max_length: int | None = None, sampling_rate: int | None = None, do_normalize: bool | None = None, device: str | None = "cpu", return_token_timestamps: bool | None = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Implementation uses PyTorch for the STFT computation if available, otherwise a slower NumPy based one. Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. truncation (`bool`, *optional*, default to `True`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*, defaults to None): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For Parakeet models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. do_normalize (`bool`, *optional*, defaults to `False`): Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance of the model. device (`str`, *optional*, defaults to `'cpu'`): Specifies the device for computation of the log-mel spectrogram of audio signals in the `_torch_extract_fbank_features` method. (e.g., "cpu", "cuda") return_token_timestamps (`bool`, *optional*, defaults to `None`): Deprecated. Use `return_attention_mask` instead from which the number of frames can be inferred. Whether or not to return the number of frames of the input raw_speech. These num_frames can be used by the model to compute word level timestamps. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. " "Failing to do so can result in silent errors that might be hard to debug." ) # Convert to torch tensor if isinstance(raw_speech, np.ndarray): raw_speech = torch.tensor(raw_speech) elif isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], np.ndarray): raw_speech = [torch.tensor(speech) for speech in raw_speech] is_batched_torch = isinstance(raw_speech, torch.Tensor) and len(raw_speech.shape) > 1 if is_batched_torch and len(raw_speech.shape) > 2: logger.warning( f"Only mono-channel audio is supported for input to {self.__class__.__name__}. " "We will take the mean of the channels to convert to mono." ) raw_speech = raw_speech.mean(-1) is_batched_sequence = isinstance(raw_speech, (list, tuple)) if is_batched_sequence: for speech in raw_speech: if len(speech.shape) > 1: logger.warning( f"Only mono-channel audio is supported for input to {self.__class__.__name__}. " "We will take the mean of the channels to convert to mono." ) speech = speech.mean(-1) if is_batched_torch or is_batched_sequence: raw_speech = [speech[:, None].to(torch.float32) for speech in raw_speech] else: raw_speech = [raw_speech[:, None].to(torch.float32)] audio_lengths = [len(speech) for speech in raw_speech] batched_speech = BatchFeature({"input_features": raw_speech, "audio_lengths": audio_lengths}) padded_inputs = self.pad( batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) input_features = padded_inputs.input_features.squeeze(-1) # preemphasis if self.preemphasis is not None: timemask = torch.arange(input_features.shape[1], device=input_features.device).unsqueeze( 0 ) < padded_inputs.audio_lengths.unsqueeze(1) input_features = torch.cat( [input_features[:, :1], input_features[:, 1:] - self.preemphasis * input_features[:, :-1]], dim=1 ) input_features = input_features.masked_fill(~timemask, 0.0) input_features = self._torch_extract_fbank_features(input_features, device) features_lengths = torch.floor_divide( padded_inputs.audio_lengths + self.n_fft // 2 * 2 - self.n_fft, self.hop_length ) attention_mask = torch.arange(input_features.shape[1], device=device)[None, :] < features_lengths[:, None] # normalize mel features, ignoring padding mask = attention_mask.unsqueeze(-1) input_features_masked = input_features * mask mean = input_features_masked.sum(dim=1) / features_lengths.unsqueeze(-1) mean = mean.unsqueeze(1) variance = ((input_features_masked - mean) ** 2 * mask).sum(dim=1) / (features_lengths - 1).unsqueeze(-1) std = torch.sqrt(variance).unsqueeze(1) input_features = (input_features - mean) / (std + EPSILON) input_features *= mask return BatchFeature( data={ "input_features": input_features, "attention_mask": attention_mask, }, tensor_type=return_tensors, ) __all__ = ["ParakeetFeatureExtractor"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/parakeet/feature_extraction_parakeet.py", "license": "Apache License 2.0", "lines": 242, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/parakeet/modular_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Parakeet model.""" import math from collections.abc import Callable from dataclasses import dataclass import torch from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, CausalLMOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple from ...utils.generic import maybe_autocast, merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ..fastspeech2_conformer.modeling_fastspeech2_conformer import FastSpeech2ConformerConvolutionModule from ..llama.modeling_llama import LlamaAttention, eager_attention_forward from .configuration_parakeet import ParakeetCTCConfig, ParakeetEncoderConfig @dataclass @auto_docstring( custom_intro=""" Extends [~modeling_outputs.BaseModelOutput] to include the output attention mask since sequence length is not preserved in the model's forward. """ ) class ParakeetEncoderModelOutput(BaseModelOutput): attention_mask: torch.Tensor | None = None class ParakeetEncoderRelPositionalEncoding(nn.Module): """Relative positional encoding for Parakeet.""" inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: ParakeetEncoderConfig, device=None): super().__init__() self.max_position_embeddings = config.max_position_embeddings base = 10000.0 inv_freq = 1.0 / ( base ** ( torch.arange(0, config.hidden_size, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / config.hidden_size ) ) self.register_buffer("inv_freq", inv_freq, persistent=False) @torch.no_grad() def forward(self, hidden_states: torch.Tensor): seq_length = hidden_states.shape[1] if seq_length > self.max_position_embeddings: raise ValueError( f"Sequence Length: {seq_length} has to be less or equal than " f"config.max_position_embeddings {self.max_position_embeddings}." ) position_ids = torch.arange(seq_length - 1, -seq_length, -1, device=hidden_states.device) inv_freq_expanded = ( self.inv_freq[None, :, None].float().expand(hidden_states.shape[0], -1, 1).to(hidden_states.device) ) position_ids_expanded = position_ids[None, None, :].float() device_type = ( hidden_states.device.type if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps" else "cpu" ) with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) sin = freqs.sin() cos = freqs.cos() # interleave sin and cos pos_embed = torch.stack([sin, cos], dim=-1) pos_embed = pos_embed.reshape(*pos_embed.shape[:-2], -1) return pos_embed.to(dtype=hidden_states.dtype) class ParakeetEncoderFeedForward(nn.Module): def __init__(self, config: ParakeetEncoderConfig): super().__init__() self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=config.attention_bias) self.activation = ACT2FN[config.hidden_act] self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.attention_bias) self.activation_dropout = config.activation_dropout def forward(self, hidden_states): hidden_states = self.activation(self.linear1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.linear2(hidden_states) return hidden_states class ParakeetEncoderConvolutionModule(FastSpeech2ConformerConvolutionModule): def __init__(self, config: ParakeetEncoderConfig, module_config=None): super().__init__(config, module_config) class ParakeetEncoderAttention(LlamaAttention): """Multi-head attention with relative positional encoding. See section 3.3 of https://huggingface.co/papers/1901.02860.""" def __init__(self, config: ParakeetEncoderConfig, layer_idx: int): super().__init__(config, layer_idx=layer_idx) self.is_causal = False # W_{k,R} projection self.relative_k_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) # global content bias self.bias_u = nn.Parameter(torch.zeros(config.num_attention_heads, self.head_dim)) # global positional bias self.bias_v = nn.Parameter(torch.zeros(config.num_attention_heads, self.head_dim)) def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.Tensor | None, attention_mask: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape hidden_shape = (batch_size, seq_length, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) query_states_with_bias_u = query_states + self.bias_u.view( 1, self.config.num_attention_heads, 1, self.head_dim ) query_states_with_bias_v = query_states + self.bias_v.view( 1, self.config.num_attention_heads, 1, self.head_dim ) relative_key_states = self.relative_k_proj(position_embeddings) relative_key_states = relative_key_states.view(batch_size, -1, self.config.num_attention_heads, self.head_dim) # terms (b) and (d) matrix_bd = query_states_with_bias_v @ relative_key_states.permute(0, 2, 3, 1) matrix_bd = self._rel_shift(matrix_bd) matrix_bd = matrix_bd[..., :seq_length] matrix_bd = matrix_bd * self.scaling if attention_mask is not None: # here the original codebase uses -10000.0 rather than float("-inf") and then manual masked fill with 0.0s # see: https://github.com/NVIDIA-NeMo/NeMo/blob/8cfedd7203462cb251a914e700e5605444277561/nemo/collections/asr/parts/submodules/multi_head_attention.py#L320-L340 # we rather went for a straight-forward approach with float("-inf") matrix_bd = matrix_bd.masked_fill_(attention_mask.logical_not(), float("-inf")) # will compute matrix_ac - terms (a) and (c) - and add matrix_bd attn_output, attn_weights = attention_interface( self, query=query_states_with_bias_u, key=key_states, value=value_states, attention_mask=matrix_bd, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights def _rel_shift(self, attention_scores): """Relative position shift for Shaw et al. style attention. See appendix B of https://huggingface.co/papers/1901.02860.""" batch_size, num_heads, query_length, position_length = attention_scores.shape attention_scores = nn.functional.pad(attention_scores, pad=(1, 0)) attention_scores = attention_scores.view(batch_size, num_heads, -1, query_length) attention_scores = attention_scores[:, :, 1:].view(batch_size, num_heads, query_length, position_length) return attention_scores class ParakeetEncoderSubsamplingConv2D(nn.Module): def __init__(self, config: ParakeetEncoderConfig): super().__init__() self.kernel_size = config.subsampling_conv_kernel_size self.stride = config.subsampling_conv_stride self.channels = config.subsampling_conv_channels self.padding = (self.kernel_size - 1) // 2 self.num_layers = int(math.log2(config.subsampling_factor)) # define layers self.layers = nn.ModuleList() self.layers.append( nn.Conv2d(1, self.channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding) ) self.layers.append(nn.ReLU()) for i in range(self.num_layers - 1): # depthwise conv self.layers.append( nn.Conv2d( self.channels, self.channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, groups=self.channels, ) ) # pointwise conv self.layers.append(nn.Conv2d(self.channels, self.channels, kernel_size=1)) # activation self.layers.append(nn.ReLU()) out_length = config.num_mel_bins // (self.stride**self.num_layers) self.linear = nn.Linear(config.subsampling_conv_channels * out_length, config.hidden_size, bias=True) def _get_output_length(self, input_lengths: torch.Tensor, conv_layer: nn.Conv2d): if hasattr(conv_layer, "stride") and conv_layer.stride != (1, 1): padding = conv_layer.padding kernel_size = conv_layer.kernel_size[0] stride = conv_layer.stride[0] output_lengths = (input_lengths + padding[0] + padding[1] - kernel_size) // stride + 1 return output_lengths return input_lengths def forward(self, input_features: torch.Tensor, attention_mask: torch.Tensor = None): hidden_states = input_features.unsqueeze(1) current_lengths = attention_mask.sum(-1) if attention_mask is not None else None for layer in self.layers: hidden_states = layer(hidden_states) # mask the hidden states if isinstance(layer, nn.Conv2d) and attention_mask is not None: current_lengths = self._get_output_length(current_lengths, layer) current_seq_length = hidden_states.shape[2] channel_mask = ( torch.arange(current_seq_length, device=attention_mask.device) < current_lengths[:, None] ) hidden_states *= channel_mask[:, None, :, None] hidden_states = hidden_states.transpose(1, 2).reshape(hidden_states.shape[0], hidden_states.shape[2], -1) hidden_states = self.linear(hidden_states) return hidden_states class ParakeetEncoderBlock(GradientCheckpointingLayer): def __init__(self, config: ParakeetEncoderConfig, layer_idx: int | None = None): super().__init__() self.gradient_checkpointing = False self.feed_forward1 = ParakeetEncoderFeedForward(config) self.self_attn = ParakeetEncoderAttention(config, layer_idx) self.conv = ParakeetEncoderConvolutionModule(config) self.feed_forward2 = ParakeetEncoderFeedForward(config) self.norm_feed_forward1 = nn.LayerNorm(config.hidden_size) self.norm_self_att = nn.LayerNorm(config.hidden_size) self.norm_conv = nn.LayerNorm(config.hidden_size) self.norm_feed_forward2 = nn.LayerNorm(config.hidden_size) self.norm_out = nn.LayerNorm(config.hidden_size) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_embeddings: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.feed_forward1(self.norm_feed_forward1(hidden_states)) hidden_states = residual + 0.5 * hidden_states # the conformer architecture uses a factor of 0.5 normalized_hidden_states = self.norm_self_att(hidden_states) attn_output, _ = self.self_attn( hidden_states=normalized_hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, **kwargs, ) hidden_states = hidden_states + attn_output conv_output = self.conv(self.norm_conv(hidden_states), attention_mask=attention_mask) hidden_states = hidden_states + conv_output ff2_output = self.feed_forward2(self.norm_feed_forward2(hidden_states)) hidden_states = hidden_states + 0.5 * ff2_output # the conformer architecture uses a factor of 0.5 hidden_states = self.norm_out(hidden_states) return hidden_states @auto_docstring class ParakeetPreTrainedModel(PreTrainedModel): config: ParakeetCTCConfig base_model_prefix = "model" main_input_name = "input_features" input_modalities = "audio" supports_gradient_checkpointing = True _no_split_modules = ["ParakeetEncoderBlock"] _supports_flat_attention_mask = True _supports_sdpa = True _supports_flex_attn = True # TODO: @eustlb, add support when flash attention supports custom attention bias _supports_flash_attn = False _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": ParakeetEncoderBlock, "attentions": ParakeetEncoderAttention, } @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if hasattr(self.config, "initializer_range"): std = self.config.initializer_range else: # 0.02 is the standard default value across the library std = getattr(self.config.get_text_config(), "initializer_range", 0.02) if isinstance(module, ParakeetEncoderAttention): # Initialize positional bias parameters init.normal_(module.bias_u, mean=0.0, std=std) init.normal_(module.bias_v, mean=0.0, std=std) elif isinstance(module, ParakeetEncoderRelPositionalEncoding): inv_freq = 1.0 / ( 10000.0 ** (torch.arange(0, self.config.hidden_size, 2, dtype=torch.int64) / self.config.hidden_size) ) init.copy_(module.inv_freq, inv_freq) def _get_subsampling_output_length(self, input_lengths: torch.Tensor): encoder_config = self.config.encoder_config if isinstance(self.config, ParakeetCTCConfig) else self.config kernel_size = encoder_config.subsampling_conv_kernel_size stride = encoder_config.subsampling_conv_stride num_layers = int(math.log2(encoder_config.subsampling_factor)) all_paddings = (kernel_size - 1) // 2 * 2 add_pad = all_paddings - kernel_size lengths = input_lengths for _ in range(num_layers): lengths = torch.div(lengths.to(dtype=torch.float) + add_pad, stride) + 1.0 lengths = torch.floor(lengths) return lengths.to(dtype=torch.int) def _get_output_attention_mask(self, attention_mask: torch.Tensor, target_length: int | None = None): """ Convert the input attention mask to its subsampled form. `target_length` sets the desired output length, useful when the attention mask length differs from `sum(-1).max()` (i.e., when the longest sequence in the batch is padded) """ output_lengths = self._get_subsampling_output_length(attention_mask.sum(-1)) # Use target_length if provided, otherwise use max length in batch max_length = target_length if target_length is not None else output_lengths.max() attention_mask = torch.arange(max_length, device=attention_mask.device) < output_lengths[:, None] return attention_mask @auto_docstring( custom_intro=""" The Parakeet Encoder model, based on the [Fast Conformer architecture](https://huggingface.co/papers/2305.05084). """ ) class ParakeetEncoder(ParakeetPreTrainedModel): config: ParakeetEncoderConfig base_model_prefix = "encoder" def __init__(self, config: ParakeetEncoderConfig): super().__init__(config) self.config = config self.gradient_checkpointing = False self.dropout = config.dropout self.dropout_positions = config.dropout_positions self.layerdrop = config.layerdrop self.input_scale = math.sqrt(config.hidden_size) if config.scale_input else 1.0 self.subsampling = ParakeetEncoderSubsamplingConv2D(config) self.encode_positions = ParakeetEncoderRelPositionalEncoding(config) self.layers = nn.ModuleList( [ParakeetEncoderBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.post_init() @auto_docstring @merge_with_config_defaults @capture_outputs @can_return_tuple def forward( self, input_features: torch.Tensor, attention_mask: torch.Tensor | None = None, output_attention_mask: bool | None = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: r""" output_attention_mask (`bool`, *optional*): Whether to return the output attention mask. Example: ```python >>> from transformers import AutoProcessor, ParakeetEncoder >>> from datasets import load_dataset, Audio >>> model_id = "nvidia/parakeet-ctc-1.1b" >>> processor = AutoProcessor.from_pretrained(model_id) >>> encoder = ParakeetEncoder.from_pretrained(model_id) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", Audio(sampling_rate=processor.feature_extractor.sampling_rate)) >>> inputs = processor(ds[0]["audio"]["array"]) >>> encoder_outputs = encoder(**inputs) >>> print(encoder_outputs.last_hidden_state.shape) ``` """ hidden_states = self.subsampling(input_features, attention_mask) hidden_states = hidden_states * self.input_scale position_embeddings = self.encode_positions(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) position_embeddings = nn.functional.dropout( position_embeddings, p=self.dropout_positions, training=self.training ) if attention_mask is not None: output_mask = self._get_output_attention_mask(attention_mask, target_length=hidden_states.shape[1]) attention_mask = output_mask.unsqueeze(1).expand(-1, hidden_states.shape[1], -1) attention_mask = attention_mask & attention_mask.transpose(1, 2) attention_mask = attention_mask.unsqueeze(1) for encoder_layer in self.layers: # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if not to_drop: hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, **kwargs, ) return ParakeetEncoderModelOutput( last_hidden_state=hidden_states, attention_mask=output_mask.int() if output_attention_mask else None ) @dataclass class ParakeetGenerateOutput(ModelOutput): """ Outputs of Parakeet models. Args: sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`): Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor logits: tuple[torch.FloatTensor] | None = None attentions: tuple[tuple[torch.FloatTensor]] | None = None hidden_states: tuple[tuple[torch.FloatTensor]] | None = None @auto_docstring( custom_intro=""" Parakeet Encoder with a Connectionist Temporal Classification (CTC) head. """ ) class ParakeetForCTC(ParakeetPreTrainedModel): config: ParakeetCTCConfig def __init__(self, config: ParakeetCTCConfig): super().__init__(config) self.encoder = ParakeetEncoder(config.encoder_config) # Conv rather than linear to be consistent with NeMO decoding layer self.ctc_head = nn.Conv1d(config.encoder_config.hidden_size, config.vocab_size, kernel_size=1) self.post_init() @auto_docstring @can_return_tuple def forward( self, input_features: torch.Tensor, attention_mask: torch.Tensor | None = None, labels: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutput: r""" Example: ```python >>> from transformers import AutoProcessor, ParakeetForCTC >>> from datasets import load_dataset, Audio >>> model_id = "nvidia/parakeet-ctc-1.1b" >>> processor = AutoProcessor.from_pretrained(model_id) >>> model = ParakeetForCTC.from_pretrained(model_id) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", Audio(sampling_rate=processor.feature_extractor.sampling_rate)) >>> inputs = processor(ds[0]["audio"]["array"], text=ds[0]["text"]) >>> outputs = model(**inputs) >>> print(outputs.loss) ```""" encoder_outputs = self.encoder( input_features=input_features, attention_mask=attention_mask, **kwargs, ) hidden_states = encoder_outputs.last_hidden_state logits = self.ctc_head(hidden_states.transpose(1, 2)).transpose(1, 2) loss = None if labels is not None: # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_features, dtype=torch.long) ) input_lengths = self._get_subsampling_output_length(attention_mask.sum(-1)) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels != self.config.pad_token_id target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) return CausalLMOutput( loss=loss, logits=logits, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @torch.no_grad() def generate( self, input_features: torch.Tensor, attention_mask: torch.Tensor | None = None, return_dict_in_generate: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> ParakeetGenerateOutput | torch.LongTensor: r""" Example: ```python >>> from transformers import AutoProcessor, ParakeetForCTC >>> from datasets import load_dataset, Audio >>> model_id = "nvidia/parakeet-ctc-1.1b" >>> processor = AutoProcessor.from_pretrained(model_id) >>> model = ParakeetForCTC.from_pretrained(model_id) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", Audio(sampling_rate=processor.feature_extractor.sampling_rate)) >>> inputs = processor(ds[0]["audio"]["array"], text=ds[0]["text"]) >>> predicted_ids = model.generate(**inputs) >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) >>> print(transcription) ``` """ kwargs["return_dict"] = True outputs: CausalLMOutput = self.forward( input_features=input_features, attention_mask=attention_mask, **kwargs, ) # greedy decoding sequences = outputs.logits.argmax(dim=-1) # mask out padded tokens if attention_mask is not None: attention_mask = self._get_output_attention_mask(attention_mask, target_length=sequences.shape[1]) sequences[~attention_mask] = self.config.pad_token_id if return_dict_in_generate: return ParakeetGenerateOutput( sequences=sequences, logits=outputs.logits, attentions=outputs.attentions, hidden_states=outputs.hidden_states, ) return sequences __all__ = ["ParakeetForCTC", "ParakeetEncoder", "ParakeetPreTrainedModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/parakeet/modular_parakeet.py", "license": "Apache License 2.0", "lines": 532, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/parakeet/processing_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...audio_utils import AudioInput, make_list_of_audio from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import auto_docstring, logging logger = logging.get_logger(__name__) class ParakeetProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "audio_kwargs": { "sampling_rate": 16000, "padding": "longest", "return_attention_mask": True, }, "text_kwargs": { "padding": True, "padding_side": "right", "add_special_tokens": False, }, "common_kwargs": {"return_tensors": "pt"}, } @auto_docstring class ParakeetProcessor(ProcessorMixin): def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) @auto_docstring def __call__( self, audio: AudioInput, text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] | None = None, sampling_rate: int | None = None, **kwargs: Unpack[ParakeetProcessorKwargs], ): r""" sampling_rate (`int`, *optional*): The sampling rate of the input audio in Hz. This should match the sampling rate expected by the feature extractor (defaults to 16000 Hz). If provided, it will be validated against the processor's expected sampling rate, and an error will be raised if they don't match. If not provided, a warning will be issued and the default sampling rate will be assumed. """ audio = make_list_of_audio(audio) output_kwargs = self._merge_kwargs( ParakeetProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if sampling_rate is None: logger.warning_once( f"You've provided audio without specifying the sampling rate. It will be assumed to be {output_kwargs['audio_kwargs']['sampling_rate']}, which can result in silent errors." ) elif sampling_rate != output_kwargs["audio_kwargs"]["sampling_rate"]: raise ValueError( f"The sampling rate of the audio ({sampling_rate}) does not match the sampling rate of the processor ({output_kwargs['audio_kwargs']['sampling_rate']}). Please provide resampled the audio to the expected sampling rate." ) if audio is not None: inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"]) if text is not None: encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if text is None: return inputs else: inputs["labels"] = encodings["input_ids"] return inputs @property def model_input_names(self): feature_extractor_input_names = self.feature_extractor.model_input_names return feature_extractor_input_names + ["labels"] __all__ = ["ParakeetProcessor"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/parakeet/processing_parakeet.py", "license": "Apache License 2.0", "lines": 79, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/parakeet/test_feature_extraction_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the Parakeet feature extraction.""" import itertools import random import unittest import numpy as np from transformers import ParakeetFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils import is_datasets_available, is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class ParakeetFeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=80, hop_length=160, win_length=400, n_fft=512, sampling_rate=16000, padding_value=0.0, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.hop_length = hop_length self.win_length = win_length self.n_fft = n_fft self.sampling_rate = sampling_rate self.padding_value = padding_value def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "win_length": self.win_length, "n_fft": self.n_fft, "sampling_rate": self.sampling_rate, "padding_value": self.padding_value, } # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester.prepare_inputs_for_common def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class ParakeetFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ParakeetFeatureExtractor def setUp(self): self.feat_extract_tester = ParakeetFeatureExtractionTester(self) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def test_torch_integration(self): """ reproducer: https://gist.github.com/eustlb/c4a0999e54466b7e8d8b040d8e0900df """ # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ 0.60935932, 1.18187428, 1.29877627, 1.36461377, 1.09311509, 1.39821815, 1.63753450, 1.37100816, 1.26510608, 1.70332706, 1.69067430, 1.28770995, 1.52999651, 1.77962756, 1.71420062, 1.21944094, 1.30884087, 1.44343364, 1.17694926, 1.42690814, 1.78877723, 1.68655288, 1.27155364, 1.66103351, 1.75820673, 1.41575801, 1.40622294, 1.70603478, 1.63117850, 1.13353217, ] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = ParakeetFeatureExtractor() inputs = feature_extractor(input_speech, return_tensors="pt") self.assertEqual(inputs.input_features.shape, (1, 586, 80)) torch.testing.assert_close(inputs.input_features[0, 100, :30], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-4) self.assertEqual(inputs.attention_mask.shape, (1, 586)) # last frame should be masked self.assertEqual(inputs.attention_mask.sum(), 585) @require_torch def test_torch_integration_batch(self): """ reproducer: https://gist.github.com/eustlb/c4a0999e54466b7e8d8b040d8e0900df """ # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ [ 0.60935932, 1.18187428, 1.29877627, 1.36461377, 1.09311533, 1.39821827, 1.63753450, 1.37100816, 1.26510608, 1.70332706, 1.69067478, 1.28770995, 1.52999651, 1.77962780, 1.71420062, 1.21944094, 1.30884087, 1.44343400, 1.17694926, 1.42690814, 1.78877664, 1.68655288, 1.27155364, 1.66103351, 1.75820673, 1.41575801, 1.40622294, 1.70603478, 1.63117862, 1.13353217], [ 0.58339858, 0.54317272, 0.46222782, 0.34154415, 0.17806509, 0.32182255, 0.28909618, 0.02141305, -0.09710173, -0.35818669, -0.48172510, -0.52942866, -0.58029658, -0.70519227, -0.67929971, -0.54698551, -0.28611183, -0.24780270, -0.31363955, -0.41913241, -0.32394424, -0.44897896, -0.68657434, -0.62047797, -0.46886450, -0.65987164, -1.02435589, -0.58527517, -0.56095684, -0.73582536], [-0.91937613, -0.97933632, -1.06843162, -1.02642107, -0.94232899, -0.83840621, -0.82306921, -0.45763230, -0.45182887, -0.75917768, -0.42541453, -0.28512970, -0.39637473, -0.66478080, -0.68004298, -0.49690303, -0.31799242, -0.12917191, 0.13149273, 0.10163058, -0.40041649, 0.05001565, 0.23906317, 0.28816083, 0.14308788, -0.29588422, -0.05428466, 0.14418560, 0.28865972, -0.12138986], [ 0.73217624, 0.84484011, 0.79323846, 0.66315967, 0.41556871, 0.88633078, 0.90718138, 0.91268104, 1.15920067, 1.26141894, 1.10222173, 0.92990804, 0.96352047, 0.88142169, 0.56635213, 0.71491158, 0.81301254, 0.67301887, 0.74780160, 0.64429688, 0.22885245, 0.47035533, 0.46498337, 0.17544533, 0.44458991, 0.79245001, 0.57207537, 0.85768145, 1.00491571, 0.93360955], [ 1.40496337, 1.32492661, 1.16519547, 0.98379827, 0.77614164, 0.95871657, 0.81910741, 1.23010278, 1.33011520, 1.16538525, 1.28319681, 1.45041633, 1.33421600, 0.91677380, 0.67107433, 0.52890682, 0.82009870, 1.15821445, 1.15343642, 1.10958862, 1.44962490, 1.44485891, 1.46043479, 1.90800595, 1.95863307, 1.63670933, 1.49021459, 1.18701911, 0.74906683, 0.84700620] ] ) # fmt: on input_speech = self._load_datasamples(5) feature_extractor = ParakeetFeatureExtractor() inputs = feature_extractor(input_speech, return_tensors="pt") self.assertEqual(inputs.input_features.shape, (5, 2941, 80)) torch.testing.assert_close(inputs.input_features[:, 100, :30], EXPECTED_INPUT_FEATURES, atol=1e-4, rtol=1e-4) self.assertEqual(inputs.attention_mask.shape, (5, 2941)) self.assertTrue(inputs.attention_mask.sum(dim=-1).tolist(), [585, 481, 1248, 990, 2940])
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/parakeet/test_feature_extraction_parakeet.py", "license": "Apache License 2.0", "lines": 167, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/parakeet/test_modeling_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Parakeet model.""" import json import tempfile import unittest from pathlib import Path from transformers import is_datasets_available, is_torch_available from transformers.testing_utils import cleanup, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_datasets_available(): from datasets import Audio, load_dataset if is_torch_available(): import torch from transformers import ( AutoProcessor, ParakeetCTCConfig, ParakeetEncoder, ParakeetEncoderConfig, ParakeetForCTC, ) class ParakeetEncoderModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=256, hidden_act="silu", dropout=0, # so gradient checkpointing doesn't fail conv_kernel_size=9, subsampling_factor=8, subsampling_conv_channels=32, use_bias=True, num_mel_bins=80, scale_input=True, ): # testing suite parameters self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.num_mel_bins = num_mel_bins self.is_training = is_training # config parameters self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.dropout = dropout self.conv_kernel_size = conv_kernel_size self.subsampling_factor = subsampling_factor self.subsampling_conv_channels = subsampling_conv_channels self.use_bias = use_bias self.num_mel_bins = num_mel_bins self.scale_input = scale_input # Calculate output sequence length after subsampling self.output_seq_length = seq_length // subsampling_factor self.encoder_seq_length = self.output_seq_length self.key_length = self.output_seq_length def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.seq_length, self.num_mel_bins]) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_features, attention_mask def get_config(self): return ParakeetEncoderConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.dropout, dropout_positions=self.dropout, layerdrop=self.dropout, activation_dropout=self.dropout, attention_dropout=self.dropout, conv_kernel_size=self.conv_kernel_size, subsampling_factor=self.subsampling_factor, subsampling_conv_channels=self.subsampling_conv_channels, use_bias=self.use_bias, num_mel_bins=self.num_mel_bins, scale_input=self.scale_input, ) def create_and_check_model(self, config, input_features, attention_mask): model = ParakeetEncoder(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, config.hidden_size) ) def prepare_config_and_inputs_for_common(self): config, input_features, attention_mask = self.prepare_config_and_inputs() inputs_dict = { "input_features": input_features, "attention_mask": attention_mask, } return config, inputs_dict def check_ctc_loss(self, config, input_values, *args): model = ParakeetForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) @require_torch class ParakeetEncoderModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ParakeetEncoder,) if is_torch_available() else () test_resize_embeddings = False def setUp(self): self.model_tester = ParakeetEncoderModelTester(self) self.config_tester = ConfigTester(self, config_class=ParakeetEncoderConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="ParakeetEncoder does not use inputs_embeds") def test_model_get_set_embeddings(self): pass class ParakeetForCTCModelTester: def __init__(self, parent, encoder_kwargs=None, is_training=True, vocab_size=128, pad_token_id=0): if encoder_kwargs is None: encoder_kwargs = {} self.parent = parent self.encoder_model_tester = ParakeetEncoderModelTester(parent, **encoder_kwargs) self.is_training = is_training self.batch_size = self.encoder_model_tester.batch_size self.output_seq_length = self.encoder_model_tester.output_seq_length self.num_hidden_layers = self.encoder_model_tester.num_hidden_layers self.seq_length = vocab_size self.hidden_size = self.encoder_model_tester.hidden_size self.vocab_size = vocab_size self.pad_token_id = pad_token_id def prepare_config_and_inputs(self): _, input_features, attention_mask = self.encoder_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_features, attention_mask def get_config(self): return ParakeetCTCConfig.from_encoder_config( encoder_config=self.encoder_model_tester.get_config(), vocab_size=self.vocab_size, pad_token_id=self.pad_token_id, ) def create_and_check_model(self, config, input_features, attention_mask): model = ParakeetForCTC(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.output_seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config, input_features, attention_mask = self.prepare_config_and_inputs() inputs_dict = { "input_features": input_features, "attention_mask": attention_mask, } return config, inputs_dict def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.encoder_model_tester.check_ctc_loss(*config_and_inputs) @require_torch class ParakeetForCTCModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ParakeetForCTC,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": ParakeetEncoder, "automatic-speech-recognition": ParakeetForCTC, } if is_torch_available() else {} ) test_attention_outputs = False test_resize_embeddings = False _is_composite = True def setUp(self): self.model_tester = ParakeetForCTCModelTester(self) self.config_tester = ConfigTester(self, config_class=ParakeetCTCConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="ParakeetEncoder does not use inputs_embeds") def test_model_get_set_embeddings(self): pass # Original function assumes vision+text model, so overwrite since Parakeet is audio+text # Below is modified from `tests/models/granite_speech/test_modeling_granite_speech.py` def test_sdpa_can_dispatch_composite_models(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") @require_torch class ParakeetForCTCIntegrationTest(unittest.TestCase): _dataset = None @classmethod def setUp(cls): cls.checkpoint_name = "nvidia/parakeet-ctc-1.1b" cls.dtype = torch.bfloat16 cls.processor = AutoProcessor.from_pretrained("nvidia/parakeet-ctc-1.1b") def tearDown(self): cleanup(torch_device, gc_collect=True) @classmethod def _load_dataset(cls): # Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process. if cls._dataset is None: cls._dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") cls._dataset = cls._dataset.cast_column( "audio", Audio(sampling_rate=cls.processor.feature_extractor.sampling_rate) ) def _load_datasamples(self, num_samples): self._load_dataset() ds = self._dataset speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow def test_1b_model_integration(self): """ bezzam reproducer (creates JSON directly in repo): https://gist.github.com/ebezzam/6382bdabfc64bb2541ca9f77deb7678d#file-reproducer_single-py eustlb reproducer: https://gist.github.com/eustlb/6e9e3aa85de3f7c340ec3c36e65f2fe6 """ RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/parakeet/expected_results_single.json" with open(RESULTS_PATH, "r") as f: raw_data = json.load(f) EXPECTED_TOKEN_IDS = torch.tensor(raw_data["token_ids"]) EXPECTED_TRANSCRIPTIONS = raw_data["transcriptions"] samples = self._load_datasamples(1) model = ParakeetForCTC.from_pretrained(self.checkpoint_name, torch_dtype=self.dtype, device_map=torch_device) model.eval() model.to(torch_device) # -- apply inputs = self.processor(samples) inputs.to(torch_device, dtype=self.dtype) predicted_ids = model.generate(**inputs) torch.testing.assert_close(predicted_ids.cpu(), EXPECTED_TOKEN_IDS) predicted_transcripts = self.processor.batch_decode(predicted_ids, skip_special_tokens=True) self.assertListEqual(predicted_transcripts, EXPECTED_TRANSCRIPTIONS) @slow def test_1b_model_integration_batched(self): """ bezzam reproducer (creates JSON directly in repo): https://gist.github.com/ebezzam/6382bdabfc64bb2541ca9f77deb7678d#file-reproducer_batched-py eustlb reproducer: https://gist.github.com/eustlb/575b5da58de34a70116a1955b1183596 """ RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/parakeet/expected_results_batch.json" with open(RESULTS_PATH, "r") as f: raw_data = json.load(f) EXPECTED_TOKEN_IDS = torch.tensor(raw_data["token_ids"]) EXPECTED_TRANSCRIPTIONS = raw_data["transcriptions"] samples = self._load_datasamples(5) model = ParakeetForCTC.from_pretrained(self.checkpoint_name, torch_dtype=self.dtype, device_map=torch_device) model.eval() model.to(torch_device) # -- apply inputs = self.processor(samples) inputs.to(torch_device, dtype=self.dtype) predicted_ids = model.generate(**inputs) torch.testing.assert_close(predicted_ids.cpu(), EXPECTED_TOKEN_IDS) predicted_transcripts = self.processor.batch_decode(predicted_ids, skip_special_tokens=True) self.assertListEqual(predicted_transcripts, EXPECTED_TRANSCRIPTIONS)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/parakeet/test_modeling_parakeet.py", "license": "Apache License 2.0", "lines": 305, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/parakeet/test_processing_parakeet.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ParakeetProcessor from transformers.testing_utils import require_torch, require_torchaudio from ...test_processing_common import ProcessorTesterMixin @require_torch @require_torchaudio class ParakeetProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = ParakeetProcessor text_input_name = "labels" model_id = "nvidia/parakeet-ctc-1.1b"
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/parakeet/test_processing_parakeet.py", "license": "Apache License 2.0", "lines": 23, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/parakeet/test_tokenization_parakeet.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the ParakeetCTC tokenizer.""" import unittest from transformers.models.parakeet import ParakeetTokenizer from ...test_tokenization_common import TokenizerTesterMixin class ParakeetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): slow_tokenizer_class = None rust_tokenizer_class = ParakeetTokenizer tokenizer_class = ParakeetTokenizer test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_id = "nvidia/parakeet-ctc-1.1b" @classmethod def setUpClass(cls): super().setUpClass() tokenizer = ParakeetTokenizer.from_pretrained("nvidia/parakeet-ctc-1.1b") tokenizer.save_pretrained(cls.tmpdirname) @unittest.skip(reason="This test does not apply to ParakeetTokenizer. More details in the test docstring itself.") def test_added_tokens_do_lower_case(self): """ Precompiled normalization from sentencepiece is `nmt_nfkc_cf` that includes lowercasing. Yet, ParakeetTokenizer does not have a do_lower_case attribute. This result in the test failing. """ pass @unittest.skip(reason="This needs a slow tokenizer. Parakeet does not have one!") def test_encode_decode_with_spaces(self): return @unittest.skip(reason="ParakeetTokenizer doesn't have tokenizer_file in its signature.") def test_rust_tokenizer_signature(self): pass
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/parakeet/test_tokenization_parakeet.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:utils/get_test_reports.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This util provides a way to manually run the tests of the transformers repo as they would be run by the CI. It was mainly used for models tests, so if you find features missing for another suite, do not hesitate to open a PR. Functionnalities: - Running specific test suite (models, tokenizers, etc.) - Parallel execution across multiple processes (each has to be launched separately with different `--processes` argument) - GPU/CPU test filtering and slow tests filter - Temporary cache management for isolated test runs - Resume functionality for interrupted test runs - Important models subset testing Example usages are below. """ import argparse import contextlib import os import subprocess import tempfile from pathlib import Path import torch from .important_files import IMPORTANT_MODELS def is_valid_test_dir(path: Path) -> bool: """Check if a given path represents a valid test dir: the path must point to a dir, not start with '__' or '.'""" return path.is_dir() and not path.name.startswith("__") and not path.name.startswith(".") def run_pytest( suite: str, subdir: Path, root_test_dir: Path, machine_type: str, dry_run: bool, tmp_cache: str, cpu_tests: bool ) -> None: """ Execute pytest on a specific test directory with configured options: - suite (str): name of the test suite being run (e.g., 'models', 'tokenizers') - subdir (Path): the specific directory containing tests to run - root_test_dir (Path): the root directory of all tests, used for relative paths - machine_type (str): type of machine/environment (e.g., 'cpu', 'single-gpu', 'multi-gpu') - dry_run (bool): if True, only print the command without executing it - tmp_cache (str): prefix for temporary cache directory. If empty, no temp cache is used - cpu_tests (bool): if True, include CPU-only tests; if False, exclude non-device tests """ relative_path = subdir.relative_to(root_test_dir) report_name = f"{machine_type}_{suite}_{relative_path}_test_reports" print(f"Suite: {suite} | Running on: {relative_path}") cmd = ["python3", "-m", "pytest", "-rsfE", "-v", f"--make-reports={report_name}", str(subdir)] if not cpu_tests: cmd = cmd + ["-m", "not not_device_test"] ctx_manager = tempfile.TemporaryDirectory(prefix=tmp_cache) if tmp_cache else contextlib.nullcontext() with ctx_manager as tmp_dir: env = os.environ.copy() if tmp_cache: env["HUGGINGFACE_HUB_CACHE"] = tmp_dir print(f"Using temporary cache located at {tmp_dir = }") print("Command:", " ".join(cmd)) if not dry_run: subprocess.run(cmd, check=False, env=env) def handle_suite( suite: str, test_root: Path, machine_type: str, dry_run: bool, tmp_cache: str = "", resume_at: str | None = None, only_in: list[str] | None = None, cpu_tests: bool = False, process_id: int = 1, total_processes: int = 1, ) -> None: """ Handle execution of a complete test suite with advanced filtering and process distribution. Args: - suite (str): Name of the test suite to run (corresponds to a directory under test_root). - test_root (Path): Root directory containing all test suites. - machine_type (str): Machine/environment type for report naming and identification. - dry_run (bool): If True, only print commands without executing them. - tmp_cache (str, optional): Prefix for temporary cache directories. If empty, no temp cache is used. - resume_at (str, optional): Resume execution starting from this subdirectory name. Useful for restarting interrupted test runs. Defaults to None (run from the beginning). - only_in (list[str], optional): Only run tests in these specific subdirectories. Can include special values like IMPORTANT_MODELS. Defaults to None (run all tests). - cpu_tests (bool, optional): Whether to include CPU-only tests. Defaults to False. - process_id (int, optional): Current process ID for parallel execution (1-indexed). Defaults to 1. - total_processes (int, optional): Total number of parallel processes. Defaults to 1. """ # Check path to suite full_path = test_root / suite if not full_path.exists(): print(f"Test folder does not exist: {full_path}") return # Establish the list of subdir to go through subdirs = sorted(full_path.iterdir()) subdirs = [s for s in subdirs if is_valid_test_dir(s)] if resume_at is not None: subdirs = [s for s in subdirs if s.name >= resume_at] if only_in is not None: subdirs = [s for s in subdirs if s.name in only_in] if subdirs and total_processes > 1: # This interleaves the subdirs / files. For instance for subdirs = [A, B, C, D, E] and 2 processes: # - script launcehd with `--processes 0 2` will run A, C, E # - script launcehd with `--processes 1 2` will run B, D subdirs = subdirs[process_id::total_processes] # If the subdir list is not empty, go through each if subdirs: for subdir in subdirs: run_pytest(suite, subdir, test_root, machine_type, dry_run, tmp_cache, cpu_tests) # Otherwise, launch pytest from the full path else: run_pytest(suite, full_path, test_root, machine_type, dry_run, tmp_cache, cpu_tests) if __name__ == "__main__": """Command-line interface for running test suite with comprehensive reporting. Check handle_suite for more details. Command-line Arguments: folder: Path to the root test directory (required) --suite: Test suite name to run (default: "models") --cpu-tests: Include CPU-only tests in addition to device tests --run-slow: Execute slow tests instead of skipping them --resume-at: Resume execution from a specific subdirectory --only-in: Run tests only in specified subdirectories (supports IMPORTANT_MODELS) --processes: Process distribution as "process_id total_processes" --dry-run: Print commands without executing them --tmp-cache: Use temporary cache directories for isolated runs --machine-type: Override automatic machine type detection Machine Type Detection: - 'cpu': No CUDA available - 'single-gpu': CUDA available with 1 GPU - 'multi-gpu': CUDA available with multiple GPUs Process Distribution: Use --processes to split work across multiple parallel processes: --processes 0 4 # This is process 0 of 4 total processes --processes 1 4 # This is process 1 of 4 total processes ... Usage Examples: # Basic model testing python3 -m utils.get_test_reports tests/ --suite models # Run slow tests for important models only python3 -m utils.get_test_reports tests/ --suite models --run-slow --only-in IMPORTANT_MODELS # Parallel execution across 4 processes, second process to launch (processes are 0-indexed) python3 -m utils.get_test_reports tests/ --suite models --processes 1 4 # Resume interrupted run from 'bert' subdirectory with a tmp cache python3 -m utils.get_test_reports tests/ --suite models --resume-at bert --tmp-cache /tmp/ # Run specific models with CPU tests python3 -m utils.get_test_reports tests/ --suite models --only-in bert gpt2 --cpu-tests # Run slow tests for only important models with a tmp cache python3 -m utils.get_test_reports tests/ --suite models --run-slow --only-in IMPORTANT_MODELS --tmp-cache /tmp/ """ parser = argparse.ArgumentParser() parser.add_argument("folder", help="Path to test root folder (e.g., ./tests)") # Choose which tests to run (broad picture) parser.add_argument("--suite", type=str, default="models", help="Test suit to run") parser.add_argument("--cpu-tests", action="store_true", help="Also runs non-device tests") parser.add_argument("--run-slow", action="store_true", help="Run slow tests instead of skipping them") parser.add_argument("--collect-outputs", action="store_true", help="Collect outputs of the tests") # Fine-grain control over the tests to run parser.add_argument("--resume-at", type=str, default=None, help="Resume at a specific subdir / file in the suite") parser.add_argument( "--only-in", type=str, nargs="+", help="Only run tests in the given subdirs / file. Use IMPORTANT_MODELS to run only the important models tests.", ) # How to run the test suite: is the work divided among processes, do a try run, use temp cache? parser.add_argument( "--processes", type=int, nargs="+", help="Inform each CI process as to the work to do: format as `process_id total_processes`. " "In order to run with multiple (eg. 3) processes, you need to run the script multiple times (eg. 3 times).", ) parser.add_argument("--dry-run", action="store_true", help="Only print commands without running them") parser.add_argument("--tmp-cache", type=str, help="Change HUGGINGFACE_HUB_CACHE to a tmp dir for each test") # This is a purely decorative argument, but it can be useful to distinguish between runs parser.add_argument( "--machine-type", type=str, default="", help="Machine type, automatically inferred if not provided" ) args = parser.parse_args() # Handle run slow if args.run_slow: os.environ["RUN_SLOW"] = "yes" print("[WARNING] Running slow tests.") else: print("[WARNING] Skipping slow tests.") # Handle multiple CI processes if args.processes is None: process_id, total_processes = 1, 1 elif len(args.processes) == 2: process_id, total_processes = args.processes else: raise ValueError(f"Invalid processes argument: {args.processes}") # Assert test root exists test_root = Path(args.folder).resolve() if not test_root.exists(): print(f"Root test folder not found: {test_root}") exit(1) # Handle collection of outputs if args.collect_outputs: os.environ["PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS"] = "yes" reports_dir = test_root.parent / "reports" os.environ["_PATCHED_TESTING_METHODS_OUTPUT_DIR"] = str(reports_dir) # Infer machine type if not provided if args.machine_type == "": if not torch.cuda.is_available(): machine_type = "cpu" else: machine_type = "multi-gpu" if torch.cuda.device_count() > 1 else "single-gpu" else: machine_type = args.machine_type # Reduce the scope for models if necessary only_in = args.only_in if args.only_in else None if only_in == ["IMPORTANT_MODELS"]: only_in = IMPORTANT_MODELS # Launch suite handle_suite( suite=args.suite, test_root=test_root, machine_type=machine_type, dry_run=args.dry_run, tmp_cache=args.tmp_cache, resume_at=args.resume_at, only_in=only_in, cpu_tests=args.cpu_tests, process_id=process_id, total_processes=total_processes, )
{ "repo_id": "huggingface/transformers", "file_path": "utils/get_test_reports.py", "license": "Apache License 2.0", "lines": 230, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2.5-Omni model.""" import tempfile import unittest from io import BytesIO from urllib.request import urlopen import librosa import pytest import requests from transformers import ( AutoProcessor, Qwen3OmniMoeForConditionalGeneration, Qwen3OmniMoeThinkerConfig, Qwen3OmniMoeThinkerForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.models.qwen3_omni_moe.configuration_qwen3_omni_moe import Qwen3OmniMoeTalkerCodePredictorConfig from transformers.testing_utils import ( Expectations, cleanup, require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, ) if is_torch_available(): import torch if is_vision_available(): from PIL import Image class Qwen3OmniMoeThinkerForConditionalGenerationTester: def __init__( self, parent, batch_size=3, feat_seq_length=30, num_channels=3, image_size=16, seq_length=39, audio_token_id=1, image_token_id=2, video_token_id=3, position_id_per_seconds=13, seconds_per_chunk=2, audio_start_token_id=4, audio_end_token_id=5, user_token_id=6, vision_start_token_id=7, vision_end_token_id=8, initializer_range=0.02, ): self.parent = parent self.vision_config = { "depth": 2, "embed_dim": 32, "hidden_act": "quick_gelu", "hidden_size": 32, "out_hidden_size": 32, "intermediate_size": 24, "mlp_ratio": 4, "num_heads": 4, "patch_size": 16, "spatial_merge_size": 1, "temporal_patch_size": 2, "initializer_range": 0.02, "deepstack_visual_indexes": [1], } self.audio_config = { "model_type": "qwen_omni_thinker_audio_encoder", "d_model": 32, "encoder_attention_heads": 4, "encoder_ffn_dim": 32, "encoder_layers": 2, "num_mel_bins": 20, "max_source_positions": 1500, "initializer_range": 0.02, "n_window": 50, "output_dim": 32, "n_window_infer": 100, } self.text_config = { "rope_parameters": { "mrope_section": [1, 1, 2], "rope_type": "default", "type": "default", "interleaved": True, }, "vocab_size": 99, "hidden_size": 32, "intermediate_size": 37, "num_hidden_layers": 4, "num_attention_heads": 4, "num_key_value_heads": 2, "hidden_act": "silu", "max_position_embeddings": 1024, "rms_norm_eps": 1e-06, "use_cache": True, "tie_word_embeddings": False, "rope_theta": 1000000.0, "use_sliding_window": False, "sliding_window": 50, "max_window_layers": 3, "attention_dropout": 0.0, "pad_token_id": 0, "initializer_range": 0.02, "moe_intermediate_size": 32, "num_experts_per_tok": 2, "num_experts": 8, "decoder_sparse_step": 1, } self.audio_token_id = audio_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.position_id_per_seconds = position_id_per_seconds self.seconds_per_chunk = seconds_per_chunk self.audio_start_token_id = audio_start_token_id self.audio_end_token_id = audio_end_token_id self.vision_start_token_id = vision_start_token_id self.vision_end_token_id = vision_end_token_id self.user_token_id = user_token_id self.initializer_range = initializer_range self.batch_size = batch_size self.feat_seq_length = feat_seq_length self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length self.is_training = False # Used from `self.model_tester` by common model tests self.num_hidden_layers = self.text_config["num_hidden_layers"] self.hidden_size = self.text_config["hidden_size"] self.num_attention_heads = self.text_config["num_attention_heads"] self.vocab_size = self.text_config["vocab_size"] def get_config(self): return Qwen3OmniMoeThinkerConfig( audio_config=self.audio_config, vision_config=self.vision_config, text_config=self.text_config, audio_token_id=self.audio_token_id, image_token_id=self.image_token_id, video_token_id=self.video_token_id, position_id_per_seconds=self.position_id_per_seconds, seconds_per_chunk=self.seconds_per_chunk, audio_start_token_id=self.audio_start_token_id, audio_end_token_id=self.audio_end_token_id, vision_start_token_id=self.vision_start_token_id, vision_end_token_id=self.vision_end_token_id, user_token_id=self.user_token_id, initializer_range=self.initializer_range, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) pixel_grid_thw = torch.LongTensor( [[1, self.image_size / patch_size, self.image_size / patch_size]] * self.batch_size ).to(pixel_values.device) input_features_values = floats_tensor( [self.batch_size, self.audio_config["num_mel_bins"], self.feat_seq_length] ) feature_attention_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.long).to(torch_device) return config, pixel_values, pixel_grid_thw, input_features_values, feature_attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, pixel_grid_thw, input_features_values, feature_attention_mask = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.get_text_config().vocab_size - 3) + 3 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) # Make sure no other tokens are set to special, to prevetn flakiness tokens_to_replace = torch.tensor( [ config.image_token_id, config.audio_token_id, config.audio_start_token_id, config.audio_end_token_id, config.vision_start_token_id, config.vision_end_token_id, ], device=input_ids.device, ) input_ids[torch.isin(input_ids, tokens_to_replace)] = config.text_config.pad_token_id attention_mask[:, :1] = 0 # Audio token placeholders should be wrapped in start and end token ids audio_feat_length = (((self.feat_seq_length - 1) // 2 + 1 - 1) // 2 + 1 - 1) // 2 + 1 input_ids[:, 1] = config.audio_start_token_id input_ids[:, 2 : (2 + audio_feat_length)] = config.audio_token_id input_ids[:, 2 + audio_feat_length] = config.audio_end_token_id # Image token placeholders should be wrapped in start and end token ids input_ids[:, -4:-1] = torch.tensor( [config.vision_start_token_id, config.image_token_id, config.vision_end_token_id] ) inputs_dict = { "input_features": input_features_values, "feature_attention_mask": feature_attention_mask, "input_ids": input_ids, "attention_mask": attention_mask, "image_grid_thw": pixel_grid_thw, "pixel_values": pixel_values, } return config, inputs_dict def create_and_check_qwenomnithinker_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask): model = Qwen3OmniMoeThinkerForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type=torch_device, dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class Qwen3OmniMoeThinkerForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen3OmniMoeThinkerForConditionalGeneration`. """ all_model_classes = (Qwen3OmniMoeThinkerForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (Qwen3OmniMoeThinkerForConditionalGeneration,) if is_torch_available() else () skip_test_audio_features_output_shape = True # Qwen3OmniMoe merges batch_size and audio_output_lengths in index 0 _is_composite = True model_split_percents = [0.5, 0.9] def setUp(self): self.model_tester = Qwen3OmniMoeThinkerForConditionalGenerationTester(self) self.config_tester = ConfigTester(self, config_class=Qwen3OmniMoeThinkerConfig, has_text_modality=False) @unittest.skip(reason="Cpu not yet supported because in QwenOmniThinker models") def test_disk_offload_bin(self): pass @unittest.skip(reason="Disk offload bin not yet supported because in QwenOmniThinker models") def test_cpu_offload(self): pass @unittest.skip(reason="Disk offload safetensors not yet supported because in QwenOmniThinker models") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Correct missing keys not yet supported because in QwenOmniThinker models") def test_correct_missing_keys(self): pass @unittest.skip(reason="Compile not yet supported because in QwenOmniThinker models") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Sdpa dispatch not yet supported because in QwenOmniThinker models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="QwenOmniThinker does not support output_hidden_states test") def test_model_outputs_equivalence(self): pass @unittest.skip(reason="Don't have time to investigate at time of merge") def test_eager_padding_matches_padding_free_with_position_ids(self): pass def test_sdpa_can_dispatch_composite_models(self): # overwrite because Qwen2 is audio+text model (not vision+text) if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) text_attn = "sdpa" if model.model._supports_sdpa else "eager" audio_attn = "sdpa" if model.audio_tower._supports_sdpa else "eager" vision_attn = "sdpa" if model.visual._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model.model.config._attn_implementation == text_attn) self.assertTrue(model.audio_tower.config._attn_implementation == audio_attn) self.assertTrue(model.visual.config._attn_implementation == vision_attn) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.model.config._attn_implementation == "eager") self.assertTrue(model_eager.audio_tower.config._attn_implementation == "eager") self.assertTrue(model_eager.visual.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") def attention_mask_padding_matches_padding_free_with_position_ids( self, attn_implementation: str, fa_kwargs: bool = False ): max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, ) .to(torch_device) .eval() ) # flatten padfree_inputs_dict = { "input_features": inputs_dict["input_features"], "feature_attention_mask": inputs_dict["feature_attention_mask"], "pixel_values": inputs_dict["pixel_values"], "image_grid_thw": inputs_dict["image_grid_thw"], "input_ids": inputs_dict["input_ids"][dummy_attention_mask.bool()].unsqueeze(0), } # add position_ids vision_position_ids, deltas = model.get_rope_index( input_ids=inputs_dict["input_ids"], image_grid_thw=inputs_dict["image_grid_thw"], attention_mask=inputs_dict["attention_mask"], audio_seqlens=torch.sum(inputs_dict["feature_attention_mask"], dim=1), ) # [3, bs, padded-seq-len] vision_padfree_positions = vision_position_ids[:, dummy_attention_mask.bool()].view( 3, -1 ) # [3, bs*padfree-len] text_padfree_positions = torch.cat( [torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()] ) # [1, bs*padfree-len] text_padfree_positions = text_padfree_positions.long().unsqueeze(0).to(torch_device) padfree_inputs_dict["position_ids"] = torch.cat([text_padfree_positions, vision_padfree_positions])[ :, None, : ] if fa_kwargs: cu_seq_lens = [0] + dummy_attention_mask.sum(1).tolist() cu_seq_lens = torch.tensor(cu_seq_lens, device=torch_device) max_length = cu_seq_lens.diff().max().item() padfree_inputs_dict.update( { "cu_seq_lens_q": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "cu_seq_lens_k": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "max_length_q": max_length, "max_length_k": max_length, } ) res_padded = model(**inputs_dict, use_cache=False) res_padfree = model(**padfree_inputs_dict, use_cache=False) logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()] logits_padfree = res_padfree.logits[0] # acceptable numerical instability tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) @unittest.skip("Cannot do contrastive generation, has custom `generate()`") def test_contrastive_generate(self): pass @unittest.skip("Cannot do contrastive generation, has custom `generate()`") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Cannot do contrastive generation, has custom `generate()`") def test_contrastive_generate_low_memory(self): pass @unittest.skip("Cannot generate from inputs embeds") def test_generate_from_inputs_embeds_with_static_cache(self): pass # TODO (joao, raushan): there are multiple standardization issues in this model that prevent this test from # passing, fix me @unittest.skip("Cannot handle 4D attention mask") @pytest.mark.torch_compile_test def test_generate_compile_model_forward_fullgraph(self): pass @unittest.skip( "There seems to be something wrong with the config, that does not play well with this test. TODO fix me" ) def test_save_load(self): pass @unittest.skip("Cannot handle 4D attention mask") def test_generate_compilation_all_outputs(self): pass @unittest.skip("In a rush to merge, cannot investigate now") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Cannot handle 4D attention mask") def test_generate_with_static_cache(self): pass @unittest.skip("Cannot handle 4D attention mask") def test_custom_4d_attention_mask(self): pass @unittest.skip("We don't really care about this one, test is not that slow") def test_model_is_small(self): pass @unittest.skip("Qwen3Omni has no base model, model architecture is special") def test_model_base_model_prefix(self): pass @unittest.skip("FIXME this is important, but in a rush to merge, cannot investigate now") def test_get_rope_index_video_with_audio(self): image_grid_thw = torch.empty((0, 3), dtype=torch.long) # 3 * 2 * 2 = 12 video tokens video_grid_thw = torch.tensor([[3, 2, 2]], dtype=torch.long) # num_audio_tokens = ((audio_seqlen - 1) // 2 + 1 - 2) // 2 + 1 # i.e.: 300 audio_seqlen -> 75 audio tokens audio_seqlens = torch.tensor([300], dtype=torch.long) second_per_grids = torch.tensor([1.0], dtype=torch.float) use_audio_in_video = True # fmt: off expected_position_ids = torch.tensor([ [[ 0, 1, # text 2, 2, # vision_bos + audio_bos # video chunk 3, 3, 3, 3, 28, 28, 28, 28, # audio chunk 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, # video chunk 53, 53, 53, 53, # audio chunk 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 78, # audio_eos + vision_eos 79, 80, # text ]], [[ 0, 1, # text 2, 2, # vision_bos + audio_bos # video chunk 3, 3, 4, 4, 3, 3, 4, 4, # audio chunk 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, # video chunk 3, 3, 4, 4, # audio chunk 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 78, # audio_eos + vision_eos 79, 80, # text ]], [[ 0, 1, # text 2, 2, # vision_bos + audio_bos # video chunk 3, 4, 3, 4, 3, 4, 3, 4, # audio chunk 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, # video chunk 3, 4, 3, 4, # audio chunk 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 78, # audio_eos + vision_eos 79, 80, # text ]], ], dtype=torch.long) # fmt: on for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = torch.tensor( [ [ 100, 101, ] + [ config.vision_start_token_id, config.audio_start_token_id, ] # 1st chunk: 8 video tokens, 50 audio tokens + [config.video_token_id] * 2 * 2 * 2 + [config.audio_token_id] * 50 + # 2nd chunk: 4 video tokens, 25 audio tokens [config.video_token_id] * 1 * 2 * 2 + [config.audio_token_id] * 25 + [ config.audio_end_token_id, config.vision_end_token_id, ] + [ 102, 103, ] ], dtype=torch.long, ) model = model_class(config) position_ids, mrope_position_deltas = model.get_rope_index( input_ids=input_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, attention_mask=None, use_audio_in_video=use_audio_in_video, audio_seqlens=audio_seqlens, second_per_grids=second_per_grids, ) self.assertTrue(torch.equal(position_ids, expected_position_ids)) def _image_features_get_expected_num_attentions(self, model_tester=None): if model_tester is None: model_tester = self.model_tester return model_tester.vision_config["depth"] def _image_features_get_expected_num_hidden_states(self, model_tester=None): if model_tester is None: model_tester = self.model_tester return model_tester.vision_config["depth"] + 1 def _audio_features_get_expected_num_attentions(self, model_tester=None): if model_tester is None: model_tester = self.model_tester return model_tester.audio_config["encoder_layers"] def _audio_features_get_expected_num_hidden_states(self, model_tester=None): if model_tester is None: model_tester = self.model_tester return model_tester.audio_config["encoder_layers"] + 1 def _video_features_get_expected_num_attentions(self, model_tester=None): if model_tester is None: model_tester = self.model_tester return model_tester.vision_config["depth"] def _video_features_get_expected_num_hidden_states(self, model_tester=None): if model_tester is None: model_tester = self.model_tester return model_tester.vision_config["depth"] + 1 def test_code_predictor_config_init(self): """ Test that Qwen3OmniMoeTalkerCodePredictorConfig initializes correctly and accepts max_window_layers while removing use_sliding_window. """ config = Qwen3OmniMoeTalkerCodePredictorConfig( vocab_size=100, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, max_window_layers=28, sliding_window=2048, ) # 1. Check max_window_layers is present self.assertEqual(config.max_window_layers, 28) # 2. Check sliding_window is present self.assertEqual(config.sliding_window, 2048) # 3. Check use_sliding_window is removed with self.assertRaises(AttributeError): _ = config.use_sliding_window @require_torch class Qwen3OmniModelIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained( "Qwen/Qwen3-Omni-30B-A3B-Instruct", min_pixels=28 * 28, max_pixels=56 * 56 ) self.audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3" self.audio_url_additional = ( "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav" ) self.image_url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg" self.messages = [ { "role": "user", "content": [ {"type": "audio", "audio_url": self.audio_url}, {"type": "image", "image_url": self.image_url}, {"type": "text", "text": "What's that sound and what kind of dog is this?"}, ], } ] self.raw_audio, _ = librosa.load( BytesIO(urlopen(self.audio_url).read()), sr=self.processor.feature_extractor.sampling_rate ) self.raw_audio_additional, _ = librosa.load( BytesIO(urlopen(self.audio_url_additional).read()), sr=self.processor.feature_extractor.sampling_rate ) self.raw_image = Image.open(requests.get(self.image_url, stream=True).raw) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_small_model_integration_test(self): model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-Omni-30B-A3B-Instruct", dtype=torch.bfloat16, device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=text, audio=[self.raw_audio], images=[self.raw_image], return_tensors="pt", padding=True ).to(torch.bfloat16) expected_input_ids = torch.tensor( [ 151644, 872, 198, 151669, 151675, 151675, 151675, 151675, 151675, 151675, 151675, 151675, 151675, 151675, 151675, 151675, 151675, ] ) torch.allclose(expected_input_ids, inputs.input_ids[0][:17], atol=3e-3) expected_pixel_slice = torch.tensor( [ [0.5234, 0.6016, 0.6562], [0.9297, 0.9375, 0.9453], [0.4902, 0.5078, 0.4902], [0.8438, 0.8438, 0.8359], [0.9688, 0.9688, 0.9688], [0.9609, 0.9531, 0.9531], ], dtype=torch.bfloat16, device="cpu", ) assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3) # verify generation inputs = inputs.to(torch_device) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20 ) EXPECTED_DECODED_TEXT = Expectations({ ("cuda", (8, 6)): "user\nWhat's that sound and what kind of dog is this?\nassistant\nBased on the audio and visual information, here is a breakdown of what you're hearing and seeing:-", ("rocm", (9, 4)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", }).get_expectation() # fmt: skip decoded_text = self.processor.decode(output[0], skip_special_tokens=True) self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT) @slow def test_small_model_integration_test_batch(self): model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-Omni-30B-A3B-Instruct", dtype=torch.bfloat16, device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=[text] * 2, audio=[self.raw_audio, self.raw_audio], images=[self.raw_image, self.raw_image], return_tensors="pt", padding=True, ).to(torch_device, dtype=torch.bfloat16) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20 ) EXPECTED_DECODED_TEXTS = Expectations( { ("cuda", 7) : [ "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever", "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever", ], ("cuda", 8): [ "user\nWhat's that sound and what kind of dog is this?\nassistant\nBased on the audio and visual information, here is a breakdown of what you're hearing and seeing:\n\n", "user\nWhat's that sound and what kind of dog is this?\nassistant\nBased on the audio and visual information, here is a breakdown of what you're hearing and seeing:\n\n" ], ("rocm", (9, 4)): [ "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", ], } ).get_expectation() # fmt: skip decoded_texts = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_texts, EXPECTED_DECODED_TEXTS) @slow def test_small_model_integration_test_multiturn(self): model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-Omni-30B-A3B-Instruct", dtype=torch.bfloat16, device_map="auto" ) messages = [ self.messages[0], { "role": "assistant", "content": [ { "type": "text", "text": "The sound is glass shattering, and the dog appears to be a Labrador Retriever.", } ], }, { "role": "user", "content": [ {"type": "audio", "audio_url": self.audio_url_additional}, {"type": "text", "text": "How about this one?"}, ], }, ] text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=text, audio=[self.raw_audio, self.raw_audio_additional], images=[self.raw_image], return_tensors="pt", padding=True, ).to(torch_device, dtype=torch.bfloat16) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20 ) EXPECTED_DECODED_TEXT = "user\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.\nuser\nHow about this one?\nassistant\nThe sound is a person coughing." self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_w_audio(self): model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-Omni-30B-A3B-Instruct", dtype=torch.bfloat16, device_map="auto" ) audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav" messages = [ { "role": "system", "content": [ { "type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.", } ], }, { "role": "user", "content": [{"type": "audio", "audio": audio_url}], }, ] audio, _ = librosa.load(BytesIO(urlopen(audio_url).read()), sr=self.processor.feature_extractor.sampling_rate) text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=text, audio=[audio], return_tensors="pt", padding=True).to( torch_device, dtype=torch.bfloat16 ) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, thinker_max_new_tokens=20, talker_max_new_tokens=10, ) EXPECTED_DECODED_TEXTS = Expectations( { ("cuda", 7): "system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nWell, I can try. But it's not always that accurate. I might be able to make", ("cuda", 8): "'system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nYes, I can analyze audio inputs to understand spoken content, and I can also make inferences about'", } ) # fmt: skip EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() self.assertEqual( self.processor.decode(output[0][0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) self.assertFalse(torch.isnan(output[1]).any().item()) @slow @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_flashatt2(self): model = Qwen3OmniMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-Omni-30B-A3B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=[text, text], audio=[self.raw_audio, self.raw_audio], images=[self.raw_image, self.raw_image], return_tensors="pt", padding=True, ).to(torch_device) output = model.generate(**inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False) EXPECTED_DECODED_TEXT = Expectations({ ("cuda", None): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.", ("cuda", (8, 6)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", ("rocm", (9, 4)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", }).get_expectation() # fmt: skip decoded_texts = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_texts[0], EXPECTED_DECODED_TEXT) self.assertEqual(decoded_texts[1], EXPECTED_DECODED_TEXT)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py", "license": "Apache License 2.0", "lines": 808, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/qwen3_omni_moe/test_processing_qwen3_omni_moe.py
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import ( Qwen3OmniMoeProcessor, ) from transformers.testing_utils import ( require_av, require_librosa, require_torch, require_torchaudio, require_torchvision, require_vision, ) from transformers.utils import is_torch_available from ...test_processing_common import ProcessorTesterMixin, url_to_local_path if is_torch_available(): import torch @require_vision @require_torch @require_torchaudio @require_torchvision class Qwen3OmniMoeProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Qwen3OmniMoeProcessor model_id = "Qwen/Qwen2.5-Omni-7B" @classmethod def _setup_image_processor(cls): image_processor_class = cls._get_component_class_from_processor("image_processor") return image_processor_class.from_pretrained( cls.model_id, size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56} ) @classmethod def _setup_video_processor(cls): video_processor_class = cls._get_component_class_from_processor("video_processor") return video_processor_class.from_pretrained( cls.model_id, size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56} ) def prepare_audio_inputs(self, batch_size: int = 3): """This function prepares a list of numpy audios.""" audio_inputs = [np.random.rand(160000) * 2 - 1] * batch_size return audio_inputs @require_torch def _test_apply_chat_template( self, modality: str, batch_size: int, return_tensors: str, input_name: str, processor_name: str, input_data: list[str], ): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") if processor_name not in self.processor_class.get_attributes(): self.skipTest(f"{processor_name} attribute not present in {self.processor_class}") batch_messages = [ [ { "role": "user", "content": [{"type": "text", "text": "Describe this."}], }, ] ] * batch_size # Test that jinja can be applied formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), batch_size) # Test that tokenizing with template and directly with `self.tokenizer` gives same output formatted_prompt_tokenized = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors ) add_special_tokens = True if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): add_special_tokens = False tok_output = processor.tokenizer( formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens ) expected_output = tok_output.input_ids self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) # Test that kwargs passed to processor's `__call__` are actually used tokenized_prompt_100 = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, padding="max_length", truncation=True, return_tensors=return_tensors, max_length=100, ) self.assertEqual(len(tokenized_prompt_100[0]), 100) # Test that `return_dict=True` returns text related inputs in the dict out_dict_text = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, ) self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"])) self.assertEqual(len(out_dict_text["input_ids"]), batch_size) self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict for idx, url in enumerate(input_data[:batch_size]): batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}] out_dict = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, num_frames=2, # by default no more than 2 frames, otherwise too slow ) input_name = getattr(self, input_name) self.assertTrue(input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), batch_size) self.assertEqual(len(out_dict["attention_mask"]), batch_size) if modality == "video": # qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw expected_video_token_count = 0 for thw in out_dict["video_grid_thw"]: expected_video_token_count += thw[0] * thw[1] * thw[2] mm_len = expected_video_token_count elif modality == "audio": mm_len = batch_size else: mm_len = batch_size * 1200 self.assertEqual(len(out_dict[input_name]), mm_len) return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list} for k in out_dict: self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors]) @unittest.skip("Skipping but this one is important, should be fixed ASAP") @parameterized.expand([(1, "pt"), (2, "pt")]) def test_apply_chat_template_image(self, batch_size: int, return_tensors: str): pass @require_av def test_apply_chat_template_video_frame_sampling(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") messages = [ [ { "role": "user", "content": [ {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), 1) formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"]) # Add video URL for return dict and load with `num_frames` arg messages[0][0]["content"].append( { "type": "video", "url": url_to_local_path( "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4" ), } ) num_frames = 3 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, num_frames=num_frames, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 7728) # Load with `fps` arg fps = 1 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 7728) # Load with `fps` and `num_frames` args, should raise an error with self.assertRaises(ValueError): out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, num_frames=num_frames, ) # Load without any arg should load the whole video out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 23184) # Load video as a list of frames (i.e. images). NOTE: each frame should have same size # because we assume they come from one video messages[0][0]["content"][-1] = { "type": "video", "url": [ "https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg", ], } out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 7600) # When the inputs are frame URLs/paths we expect that those are already # sampled and will raise an error is asked to sample again. with self.assertRaises(ValueError): out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, do_sample_frames=True, num_frames=num_frames, ) @require_librosa @require_av def test_chat_template_audio_from_video(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest(f"{self.processor_class} does not support video inputs") if "feature_extractor" not in self.processor_class.get_attributes(): self.skipTest(f"feature_extractor attribute not present in {self.processor_class}") video_file_path = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset" ) messages = [ { "role": "user", "content": [ {"type": "video", "path": video_file_path}, {"type": "text", "text": "Which of these animals is making the sound?"}, ], }, { "role": "assistant", "content": [{"type": "text", "text": "It is a cow."}], }, { "role": "user", "content": [ {"type": "text", "text": "Tell me all about this animal."}, ], }, ] formatted_prompt = processor.apply_chat_template([messages], add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), 1) # batch size=1 out_dict = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", load_audio_from_video=True, ) self.assertTrue(self.audio_input_name in out_dict) self.assertTrue(self.videos_input_name in out_dict) # should always have input_ids and attention_mask self.assertEqual(len(out_dict["input_ids"]), 1) # batch-size=1 self.assertEqual(len(out_dict["attention_mask"]), 1) # batch-size=1 self.assertEqual(len(out_dict[self.audio_input_name]), 1) # 1 audio in the conversation self.assertEqual(len(out_dict[self.videos_input_name]), 145912) # 1 video in the conversation
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_omni_moe/test_processing_qwen3_omni_moe.py", "license": "Apache License 2.0", "lines": 308, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/blt/configuration_blt.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Blt model configuration""" from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters from ...utils import logging logger = logging.get_logger(__name__) class BltLocalEncoderConfig(PreTrainedConfig): """ Configuration class for the Blt Local Encoder component. """ model_type = "blt_local_encoder" default_theta = 500000.0 def __init__( self, vocab_size: int | None = 260, cross_attn_all_layers: bool | None = False, cross_attn_k: int | None = 2, hidden_size_global: int | None = 2048, hidden_size: int | None = 1024, num_attention_heads: int | None = 16, num_key_value_heads: int | None = None, num_hidden_layers: int | None = 1, rms_norm_eps: float | None = 1e-5, dropout: float | None = 0.0, max_position_embeddings: int | None = 24576, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, hidden_act: str | None = "silu", intermediate_size: int | None = 2816, initializer_range: float | None = 0.02, **kwargs, ): self.vocab_size = vocab_size self.cross_attn_all_layers = cross_attn_all_layers self.cross_attn_k = cross_attn_k self.hidden_size_global = hidden_size_global self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads or num_attention_heads self.head_dim = hidden_size // num_attention_heads self.intermediate_size = intermediate_size or int(8 * hidden_size / 3) self.num_hidden_layers = num_hidden_layers self.rms_norm_eps = rms_norm_eps self.dropout = dropout self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.initializer_range = initializer_range self.rope_parameters = rope_parameters # Remove tie_word_embeddings from kwargs to avoid duplicate parameter error kwargs.pop("tie_word_embeddings", None) super().__init__(**kwargs, tie_word_embeddings=False) class BltLocalDecoderConfig(PreTrainedConfig): """ Configuration class for the Blt Local Decoder component. """ model_type = "blt_local_decoder" default_theta = 500000.0 def __init__( self, vocab_size: int | None = 260, cross_attn_all_layers: bool | None = True, cross_attn_k: int | None = 2, hidden_size_global: int | None = 2048, hidden_size: int | None = 1024, num_attention_heads: int | None = 16, num_key_value_heads: int | None = None, num_hidden_layers: int | None = 9, rms_norm_eps: float | None = 1e-5, dropout: float | None = 0.0, max_position_embeddings: int | None = 24576, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, hidden_act: str | None = "silu", intermediate_size: int | None = 2816, initializer_range: float | None = 0.02, pad_token_id: int | None = None, bos_token_id: int | None = None, eos_token_id: int | None = None, tie_word_embeddings: bool | None = False, **kwargs, ): self.vocab_size = vocab_size self.cross_attn_all_layers = cross_attn_all_layers self.cross_attn_k = cross_attn_k self.hidden_size_global = hidden_size_global self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads or num_attention_heads self.head_dim = hidden_size // num_attention_heads self.intermediate_size = intermediate_size or int(8 * hidden_size / 3) self.num_hidden_layers = num_hidden_layers self.rms_norm_eps = rms_norm_eps self.dropout = dropout self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.initializer_range = initializer_range self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.tie_word_embeddings = False # Force-set to False for BC self.rope_parameters = rope_parameters super().__init__(**kwargs) class BltGlobalTransformerConfig(PreTrainedConfig): """ Configuration class for the Blt Global Transformer component. """ model_type = "blt_global_transformer" default_theta = 500000.0 def __init__( self, hidden_size: int | None = 2048, num_attention_heads: int | None = 16, num_key_value_heads: int | None = None, num_hidden_layers: int | None = 25, rms_norm_eps: float | None = 1e-5, dropout: float | None = 0.0, max_position_embeddings: int | None = 4096, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, hidden_act: str | None = "silu", intermediate_size: int | None = 5632, initializer_range: float | None = 0.02, tie_word_embeddings: bool | None = False, **kwargs, ): self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads or num_attention_heads self.head_dim = hidden_size // num_attention_heads self.intermediate_size = intermediate_size or int(8 * hidden_size / 3) self.num_hidden_layers = num_hidden_layers self.rms_norm_eps = rms_norm_eps self.dropout = dropout self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.initializer_range = initializer_range self.tie_word_embeddings = False self.rope_parameters = rope_parameters super().__init__(**kwargs) class BltPatcherConfig(PreTrainedConfig): r""" Configuration class for the Blt Patcher/Entropy model component. Args: vocab_size (`int`, *optional*, defaults to 260): Vocabulary size of the Blt patcher model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling the patcher model. hidden_size (`int`, *optional*, defaults to 768): Dimension of the hidden representations. num_hidden_layers (`int`, *optional*, defaults to 14): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. intermediate_size (`int`, *optional*, defaults to 2048): Dimension of the MLP representations. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ model_type = "blt_patcher" def __init__( self, vocab_size: int | None = 260, hidden_size: int | None = 768, num_hidden_layers: int | None = 14, num_attention_heads: int | None = 12, num_key_value_heads: int | None = None, max_position_embeddings: int | None = 8192, rms_norm_eps: float | None = 1e-5, dropout: float | None = 0.0, intermediate_size: int | None = 2048, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, initializer_range: float | None = 0.02, tie_word_embeddings: bool | None = False, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = hidden_size // num_attention_heads self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads self.max_position_embeddings = max_position_embeddings self.rms_norm_eps = rms_norm_eps self.dropout = dropout self.hidden_act = "silu" # Blt uses silu activation self.intermediate_size = intermediate_size or int(8 * self.hidden_size / 3) self.initializer_range = initializer_range self.rope_parameters = rope_parameters self.tie_word_embeddings = False super().__init__(**kwargs) class BltConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`BltModel`]. It is used to instantiate a Blt model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 260): Vocabulary size of the Blt model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BltModel`]. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. patch_in_forward (`bool`, *optional*, defaults to `True`): Whether to perform patching during the forward pass. patch_size (`int`, *optional*, defaults to 4): Size of the patches used in the patching mechanism. patching_mode (`str`, *optional*, defaults to `"entropy"`): The mode used for patching, such as entropy-based patching. patching_threshold (`float`, *optional*, defaults to 1.34): Threshold value used for determining when to apply patches. patching_batch_size (`int`, *optional*, defaults to 1): Batch size used during the patching process. max_patch_length (`int`, *optional*): Maximum length of patches that can be generated. cross_attn_k (`int`, *optional*, defaults to 2): Number of cross-attention heads used in the model. encoder_hash_byte_group_size (`list`, *optional*): List of byte group sizes used in the encoder hash function. encoder_hash_byte_group_vocab (`int`, *optional*, defaults to 500002): Vocabulary size for the encoder hash byte groups. encoder_hash_byte_group_nb_functions (`int`, *optional*, defaults to 1): Number of hash functions used in the encoder byte grouping. patcher_config (`BltPatcherConfig`, *optional*): Configuration for the patcher component of the model. encoder_config (`BltLocalEncoderConfig`, *optional*): Configuration for the local encoder component of the model. decoder_config (`BltLocalDecoderConfig`, *optional*): Configuration for the local decoder component of the model. global_config (`BltGlobalTransformerConfig`, *optional*): Configuration for the global transformer component of the model. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. ```python >>> from transformers import BltModel, BltConfig >>> # Initializing a Blt configuration >>> configuration = BltConfig() >>> # Initializing a model from the configuration >>> model = BltModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` Checkpoint: [facebook/blt](https://huggingface.co/facebook/blt) """ model_type = "blt" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 500000.0 sub_configs = { "patcher_config": BltPatcherConfig, "encoder_config": BltLocalEncoderConfig, "decoder_config": BltLocalDecoderConfig, "global_config": BltGlobalTransformerConfig, } def __init__( self, vocab_size: int | None = 260, max_position_embeddings: int | None = 4096, patch_in_forward: bool | None = True, patch_size: int | None = 4, patching_mode: str | None = "entropy", patching_threshold: float | None = 1.335442066192627, patching_batch_size: int | None = 1, max_patch_length: int | None = None, cross_attn_k: int | None = 2, encoder_hash_byte_group_size: int | None = None, encoder_hash_byte_group_vocab: int | None = 500002, encoder_hash_byte_group_nb_functions: int | None = 1, patcher_config: dict | None = None, encoder_config: dict | None = None, decoder_config: dict | None = None, global_config: dict | None = None, tie_word_embeddings: bool | None = False, pad_token_id: int | None = None, bos_token_id: int | None = None, eos_token_id: int | None = None, initializer_range: float | None = 0.02, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, **kwargs, ): # Basic model configuration self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range # Patching configuration self.patch_in_forward = patch_in_forward self.patch_size = patch_size self.patching_mode = patching_mode self.patching_threshold = patching_threshold self.patching_batch_size = patching_batch_size self.max_patch_length = max_patch_length self.patching_device = kwargs.get("patching_device", "cuda") self.realtime_patching = kwargs.get("realtime_patching", True) self.patching_threshold_add = kwargs.get("patching_threshold_add") self.monotonicity = kwargs.get("monotonicity", False) # Cross attention configurations self.cross_attn_k = cross_attn_k # Encoder configurations self.encoder_hash_byte_group_size = encoder_hash_byte_group_size or [3, 4, 5, 6, 7, 8] self.encoder_hash_byte_group_vocab = encoder_hash_byte_group_vocab self.encoder_hash_byte_group_nb_functions = encoder_hash_byte_group_nb_functions # Initialize component configurations if patcher_config is None: self.patcher_config = BltPatcherConfig(initializer_range=initializer_range) logger.info("patcher_config is None, using default Blt patcher config") elif isinstance(patcher_config, dict): patcher_config.setdefault("initializer_range", initializer_range) self.patcher_config = BltPatcherConfig(**patcher_config) elif isinstance(patcher_config, BltPatcherConfig): self.patcher_config = patcher_config if encoder_config is None: self.encoder_config = BltLocalEncoderConfig(initializer_range=initializer_range) logger.info("encoder_config is None, using default Blt encoder config") elif isinstance(encoder_config, dict): encoder_config.setdefault("initializer_range", initializer_range) self.encoder_config = BltLocalEncoderConfig(**encoder_config) elif isinstance(encoder_config, BltLocalEncoderConfig): self.encoder_config = encoder_config if decoder_config is None: self.decoder_config = BltLocalDecoderConfig(initializer_range=initializer_range) logger.info("decoder_config is None, using default Blt decoder config") elif isinstance(decoder_config, dict): decoder_config.setdefault("initializer_range", initializer_range) self.decoder_config = BltLocalDecoderConfig(**decoder_config) elif isinstance(decoder_config, BltLocalDecoderConfig): self.decoder_config = decoder_config if global_config is None: self.global_config = BltGlobalTransformerConfig(initializer_range=initializer_range) logger.info("global_config is None, using default Blt global config") elif isinstance(global_config, dict): global_config.setdefault("initializer_range", initializer_range) self.global_config = BltGlobalTransformerConfig(**global_config) elif isinstance(global_config, BltGlobalTransformerConfig): self.global_config = global_config # Determine if token embedding projection is needed based on dimension mismatch (7b) encoder_cross_output_size = self.encoder_config.hidden_size * self.cross_attn_k self.global_config.encoder_cross_output_size = ( encoder_cross_output_size if encoder_cross_output_size != self.global_config.hidden_size else None ) self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.tie_word_embeddings = tie_word_embeddings self.rope_parameters = rope_parameters super().__init__(**kwargs) __all__ = [ "BltConfig", "BltPatcherConfig", "BltLocalEncoderConfig", "BltLocalDecoderConfig", "BltGlobalTransformerConfig", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/blt/configuration_blt.py", "license": "Apache License 2.0", "lines": 382, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/blt/convert_blt_weights_to_hf.py
import argparse import json import logging import os from typing import Any import torch from huggingface_hub import hf_hub_download, upload_folder from safetensors.torch import load_file, save_file from tokenizers import Tokenizer, decoders, pre_tokenizers, processors from tokenizers.models import BPE from transformers import PreTrainedTokenizerFast from transformers.convert_slow_tokenizer import bytes_to_unicode from transformers.utils import logging as transformers_logging logger = transformers_logging.get_logger(__name__) transformers_logging.set_verbosity_info() def merge_configurations(config_path: str, entropy_params_path: str) -> dict[str, Any]: logger.info("Merging configurations") with open(config_path, "r") as f: main_config = json.load(f) with open(entropy_params_path, "r") as f: entropy_data = json.load(f) entropy_model_params = entropy_data.get("entropy_model", {}) patcher_args = entropy_data.get("data", {}).get("patcher_args", {}) unified_config = main_config.copy()["args"] for key in ["vocab_size", "dim", "n_layers", "n_heads", "max_seqlen"]: if key in unified_config and not isinstance(unified_config[key], int): unified_config[key] = int(unified_config[key]) patch_size = patcher_args.get("patch_size", 8) if isinstance(patch_size, float): patch_size = int(patch_size) # Create patcher config patcher_hidden_size = int(entropy_model_params.get("dim", 512)) patcher_multiple_of = int(entropy_model_params.get("multiple_of", 256)) patcher_intermediate_size = patcher_multiple_of * ( (int(8 * patcher_hidden_size / 3) + patcher_multiple_of - 1) // patcher_multiple_of ) patcher_config = { "vocab_size": int(entropy_model_params.get("vocab_size", 256)), "hidden_size": patcher_hidden_size, "num_hidden_layers": int(entropy_model_params.get("n_layers", 8)), "num_attention_heads": int(entropy_model_params.get("n_heads", 8)), "num_key_value_heads": int(entropy_model_params.get("n_kv_heads")) if entropy_model_params.get("n_kv_heads") is not None else None, "max_position_embeddings": int(entropy_model_params.get("max_seqlen", 1024)), "norm_eps": entropy_model_params.get("norm_eps", 1e-5), "dropout": entropy_model_params.get("dropout", 0.0), "rope_theta": entropy_model_params.get("rope_theta", 10000.0), "attn_impl": entropy_model_params.get("attn_impl", "sdpa"), "attn_bias_type": entropy_model_params.get("attn_bias_type", "causal"), "intermediate_size": patcher_intermediate_size, } # Create encoder config encoder_hidden_size = unified_config.get("dim_local_encoder", 1024) encoder_multiple_of = unified_config.get("multiple_of", 256) encoder_intermediate_size = encoder_multiple_of * ( (int(8 * encoder_hidden_size / 3) + encoder_multiple_of - 1) // encoder_multiple_of ) encoder_config = { "vocab_size": unified_config.get("vocab_size", 256), "cross_attn_all_layers": unified_config.get("cross_attn_all_layers_encoder", False), "cross_attn_k": unified_config.get("cross_attn_k", 2), "hidden_size_global": unified_config.get("dim_global", 2048), "pm_size": unified_config.get("pm_size", 0), "hidden_size": encoder_hidden_size, "num_attention_heads": unified_config.get("n_heads_local_encoder", 16), "num_key_value_heads": unified_config.get("n_kv_heads"), "num_hidden_layers": unified_config.get("n_layers_local_encoder", 1), "norm_eps": unified_config.get("norm_eps", 1e-5), "dropout": unified_config.get("dropout", 0.0), "max_position_embeddings": unified_config.get("max_encoder_seq_length") or unified_config.get("max_seqlen", 1024), "rope_theta": unified_config.get("rope_theta", 10000.0), "rope_parameters": {"rope_type": "default"}, "hidden_act": unified_config.get("hidden_act", "silu"), "_attn_implementation": unified_config.get("_attn_implementation", "sdpa"), "intermediate_size": encoder_intermediate_size, } # Create decoder config decoder_hidden_size = unified_config.get("dim_local_decoder", 1024) decoder_multiple_of = unified_config.get("multiple_of", 256) decoder_intermediate_size = decoder_multiple_of * ( (int(8 * decoder_hidden_size / 3) + decoder_multiple_of - 1) // decoder_multiple_of ) decoder_config = { "vocab_size": unified_config.get("vocab_size", 256), "cross_attn_all_layers": unified_config.get("cross_attn_all_layers_decoder", False), "cross_attn_k": unified_config.get("cross_attn_k", 2), "hidden_size_global": unified_config.get("dim_global", 2048), "hidden_size": decoder_hidden_size, "num_attention_heads": unified_config.get("n_heads_local_decoder", 16), "num_key_value_heads": unified_config.get("n_kv_heads"), "num_hidden_layers": unified_config.get("n_layers_local_decoder", 9), "norm_eps": unified_config.get("norm_eps", 1e-5), "dropout": unified_config.get("dropout", 0.0), "max_position_embeddings": unified_config.get("max_encoder_seq_length") or unified_config.get("max_seqlen", 1024), "rope_theta": unified_config.get("rope_theta", 10000.0), "rope_parameters": {"rope_type": "default"}, "hidden_act": unified_config.get("hidden_act", "silu"), "_attn_implementation": unified_config.get("_attn_implementation", "sdpa"), "intermediate_size": decoder_intermediate_size, } # Create global transformer config global_hidden_size = unified_config.get("dim_global", 2048) global_multiple_of = unified_config.get("multiple_of", 256) global_intermediate_size = global_multiple_of * ( (int(8 * global_hidden_size / 3) + global_multiple_of - 1) // global_multiple_of ) global_config = { "hidden_size": global_hidden_size, "num_attention_heads": unified_config.get("n_heads_global", 16), "num_key_value_heads": unified_config.get("n_kv_heads_global"), "num_hidden_layers": unified_config.get("n_layers_global", 25), "norm_eps": unified_config.get("norm_eps", 1e-5), "dropout": unified_config.get("dropout", 0.0), "max_position_embeddings": unified_config.get("max_seqlen", 1024), "rope_theta": unified_config.get("rope_theta", 10000.0), "rope_parameters": {"rope_type": "default"}, "hidden_act": unified_config.get("hidden_act", "silu"), "_attn_implementation": unified_config.get("_attn_implementation", "sdpa"), "intermediate_size": global_intermediate_size, } # Create main config with sub-configs main_config_dict = { "model_type": "blt", "vocab_size": unified_config.get("vocab_size", 256), "max_position_embeddings": unified_config.get("max_seqlen", 1024), "patch_in_forward": True, "realtime_patching": True, "patching_mode": "entropy", "patch_size": patch_size, "patching_threshold": patcher_args.get("threshold", 0.5), "patching_threshold_add": patcher_args.get("threshold_add", 0.0), "max_patch_length": patcher_args.get("max_patch_length"), "patching_batch_size": patcher_args.get("patching_batch_size", 1), "patching_device": patcher_args.get("patching_device", "cuda"), "monotonicity": patcher_args.get("monotonicity", False), "cross_attn_k": unified_config.get("cross_attn_k", 2), "encoder_hash_byte_group_size": unified_config.get("encoder_hash_byte_group_size"), "encoder_hash_byte_group_vocab": unified_config.get("encoder_hash_byte_group_vocab", 30000), "encoder_hash_byte_group_nb_functions": unified_config.get("encoder_hash_byte_group_nb_functions", 3), "pm_size": unified_config.get("pm_size", 0), "patcher_config": patcher_config, "encoder_config": encoder_config, "decoder_config": decoder_config, "global_config": global_config, } main_config_dict["tie_word_embeddings"] = False logger.info(f"Merged configuration with {len(main_config_dict)} parameters") return main_config_dict def apply_weight_mapping(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]: component_mappings = { ".attention.": ".self_attn.", ".feed_forward.": ".mlp.", ".attention_norm.": ".input_layernorm.", ".ffn_norm.": ".post_attention_layernorm.", ".tok_embeddings.": ".embed_tokens.", ".cross_attn_norm_q.": ".q_norm.", ".cross_attn_norm_kv.": ".k_norm.", ".w1.": ".gate_proj.", ".w2.": ".down_proj.", ".w3.": ".up_proj.", ".wq.": ".q_proj.", ".wk.": ".k_proj.", ".wv.": ".v_proj.", ".wo.": ".o_proj.", ".output.": ".lm_head.", } new_state_dict = {} for old_key, tensor in state_dict.items(): new_key = old_key for old_pattern, new_pattern in component_mappings.items(): if old_pattern in new_key: new_key = new_key.replace(old_pattern, new_pattern) new_state_dict[new_key] = tensor return new_state_dict def convert_hash_embeddings_to_fused( unified_weights: dict[str, torch.Tensor], config: dict[str, Any] ) -> dict[str, torch.Tensor]: """Convert ModuleList hash embeddings to nn.embedding format""" original_keys_format = [ key for key in unified_weights.keys() if "encoder_hash_tok_embedding." in key and ".weight" in key and key.split(".")[-2].isdigit() ] num_embeddings = config.get("encoder_hash_byte_group_nb_functions", 1) * len( config.get("encoder_hash_byte_group_size", [3, 4, 5, 6, 7, 8]) ) vocab_size = config.get("encoder_hash_byte_group_vocab", 500002) hidden_size = config.get("encoder_config", {}).get("hidden_size", 1024) fused_weight = torch.zeros(vocab_size * num_embeddings, hidden_size) sorted_keys = sorted(original_keys_format, key=lambda k: int(k.split(".")[-2])) for i, old_key in enumerate(sorted_keys): start_idx = i * vocab_size end_idx = (i + 1) * vocab_size fused_weight[start_idx:end_idx] = unified_weights[old_key] logger.info(f"Copied {old_key} to indices {start_idx}:{end_idx}") del unified_weights[old_key] fused_key = "model.encoder_hash_tok_embedding.weight" unified_weights[fused_key] = fused_weight return unified_weights def merge_weights(weights_path: str, entropy_weights_path: str) -> dict[str, torch.Tensor]: main_weights = load_file(weights_path) entropy_weights = torch.load(entropy_weights_path, map_location="cpu", weights_only=True) if "model" in entropy_weights: entropy_weights = entropy_weights["model"] elif "state_dict" in entropy_weights: entropy_weights = entropy_weights["state_dict"] unified_weights = main_weights.copy() for key, tensor in entropy_weights.items(): patcher_key = f"patcher.{key}" unified_weights[patcher_key] = tensor unified_weights = apply_weight_mapping(unified_weights) decoder_lm_head_key = "local_decoder.lm_head.weight" top_lm_head_key = "lm_head.weight" unified_weights[top_lm_head_key] = unified_weights[decoder_lm_head_key] del unified_weights[decoder_lm_head_key] prefixed_weights = {} for key, tensor in unified_weights.items(): if key == top_lm_head_key: prefixed_weights[key] = tensor elif not key.startswith("model."): prefixed_weights[f"model.{key}"] = tensor else: prefixed_weights[key] = tensor unified_weights = prefixed_weights return unified_weights def create_tokenizer_config(output_dir: str, config: dict[str, Any]): tokenizer_config = { "tokenizer_class": "PreTrainedTokenizerFast", "vocab_size": config.get("vocab_size", 256), "model_max_length": config.get("max_seqlen", 1024), "model_input_names": ["input_ids", "attention_mask"], "add_bos_token": True, "add_eos_token": True, "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "unk_token": "<unk>", } tokenizer_path = os.path.join(output_dir, "tokenizer_config.json") with open(tokenizer_path, "w") as f: json.dump(tokenizer_config, f, indent=2) def create_tokenizer_json(output_dir: str, config: dict[str, Any]): byte_encoder = bytes_to_unicode() vocab: dict[str, int] = {} vocab["<boe>"] = 0 vocab["<s>"] = 1 vocab["</s>"] = 2 vocab["<pad>"] = 3 offset = 4 for byte_val, unicode_char in byte_encoder.items(): vocab[unicode_char] = byte_val + offset backend = Tokenizer( BPE(vocab=vocab, merges=[], continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False) ) backend.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) backend.decoder = decoders.ByteLevel() bos = config.get("bos_token", "<s>") backend.post_processor = processors.TemplateProcessing( single=f"{bos}:0 $A:0", pair=f"{bos}:0 $A:0 $B:1", special_tokens=[(bos, 1)], ) tokenizer = PreTrainedTokenizerFast( tokenizer_object=backend, bos_token=config.get("bos_token", "<s>"), eos_token=config.get("eos_token", "</s>"), pad_token=config.get("pad_token", "<pad>"), unk_token=config.get("unk_token", "<unk>"), ) tokenizer.add_bos_token = bool(config.get("add_bos_token", True)) tokenizer.add_eos_token = bool(config.get("add_eos_token", True)) tokenizer.save_pretrained(output_dir) logger.info(f"Saved tokenizer.json to {os.path.join(output_dir, 'tokenizer.json')}") def push_to_hub( local_dir: str, repo_id: str, commit_message: str = "Upload converted Blt model", private: bool = False, token: str | None = None, ) -> None: try: upload_folder( folder_path=local_dir, repo_id=repo_id, commit_message=commit_message, repo_type="model", token=token, ) logger.info(f"Successfully pushed model to {repo_id}") except Exception as e: logger.error(f"Failed to push model to Hub: {e}") raise def convert_hf_blt_to_unified( model_id: str, output_dir: str, config_name: str = "config.json", weights_name: str = "model.bin", cache_dir: str | None = None, push_to_hub_repo: str | None = None, hub_private: bool = False, hub_token: str | None = None, ) -> None: # Download model files config_path = hf_hub_download(repo_id=model_id, filename="config.json", cache_dir=cache_dir) weights_path = hf_hub_download(repo_id=model_id, filename="model.safetensors", cache_dir=cache_dir) entropy_params_path = hf_hub_download(repo_id=model_id, filename="entropy_model/params.json", cache_dir=cache_dir) entropy_weights_path = hf_hub_download( repo_id=model_id, filename="entropy_model/consolidated.pth", cache_dir=cache_dir ) unified_config = merge_configurations(config_path, entropy_params_path) unified_weights = merge_weights(weights_path, entropy_weights_path) unified_weights = convert_hash_embeddings_to_fused(unified_weights, unified_config) os.makedirs(output_dir, exist_ok=True) config_path = os.path.join(output_dir, config_name) with open(config_path, "w") as f: json.dump(unified_config, f, indent=2) if weights_name.endswith(".bin"): weights_name = weights_name.replace(".bin", ".safetensors") weights_path = os.path.join(output_dir, weights_name) save_file(unified_weights, weights_path) create_tokenizer_json(output_dir=output_dir, config=unified_config) create_tokenizer_config(output_dir, unified_config) logger.info(f"Conversion completed, model saved to: {output_dir}") if push_to_hub_repo: push_to_hub( local_dir=output_dir, repo_id=push_to_hub_repo, commit_message="Upload Blt model converted", private=hub_private, token=hub_token, ) def main(): parser = argparse.ArgumentParser( description="Convert Blt models from HuggingFace Hub format to unified format", formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "--model_id", type=str, default="facebook/blt-7b", ) parser.add_argument( "--output_dir", type=str, default="./blt_converted", ) parser.add_argument( "--config_name", type=str, default="config.json", ) parser.add_argument( "--weights_name", type=str, default="model.bin", ) parser.add_argument( "--cache_dir", type=str, default=None, ) parser.add_argument( "--debug", action="store_true", default=True, ) parser.add_argument( "--push_to_hub", type=str, default=None, ) parser.add_argument( "--hub_private", action="store_true", default=False, ) parser.add_argument( "--hub_token", type=str, default="hf_token", ) args = parser.parse_args() transformers_logging.set_verbosity_debug() logging.basicConfig(level=logging.DEBUG) try: convert_hf_blt_to_unified( model_id=args.model_id, output_dir=args.output_dir, config_name=args.config_name, weights_name=args.weights_name, cache_dir=args.cache_dir, push_to_hub_repo=False, # args.push_to_hub, hub_private=args.hub_private, hub_token=args.hub_token, ) except Exception as e: logger.error(f"Conversion failed: {e}") raise if __name__ == "__main__": main()
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/blt/convert_blt_weights_to_hf.py", "license": "Apache License 2.0", "lines": 403, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/transformers:tests/models/blt/test_modeling_blt.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Blt model.""" import unittest import pytest from parameterized import parameterized from transformers import AutoTokenizer, is_torch_available from transformers.testing_utils import ( cleanup, require_torch, require_torch_accelerator, require_torch_bf16, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, _test_eager_matches_sdpa_inference, ) if is_torch_available(): import torch from transformers import BltConfig, BltForCausalLM, BltModel class BltModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = BltModel def __init__( self, parent, ignore_index=-100, seq_length=7, is_training=True, ): super().__init__(parent) self.parent = parent self.ignore_index = ignore_index self.seq_length = seq_length self.is_training = is_training self.batch_size = 3 # Common parameters for all configs self.hidden_size = 16 self.num_hidden_layers = 1 self.num_attention_heads = 2 self.num_key_value_heads = 2 self.intermediate_size = 32 self.hidden_act = "silu" self.max_position_embeddings = 32 self.vocab_size = 32 self.rope_theta = 500000.0 self.rope_parameters = {"rope_type": "default"} self.rms_norm_eps = 1e-5 self.dropout = 0.0 self.encoder_hash_byte_group_size = [2, 3] self.encoder_hash_byte_group_vocab = 64 self.encoder_hash_byte_group_nb_functions = 1 # Common parameters for all configs self.patcher_config = { "hidden_size": self.hidden_size, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.encoder_config = { "hidden_size": self.hidden_size, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.decoder_config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "hidden_size_global": self.hidden_size * 2, # Must match global transformer output size "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.global_config = { "hidden_size": self.hidden_size * 2, # Double the hidden size for global transformer "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.num_hidden_layers = self.encoder_config["num_hidden_layers"] def get_config(self): config = BltConfig( vocab_size=self.vocab_size, max_position_embeddings=self.max_position_embeddings, patch_in_forward=False, # Disable patching for tests patch_size=4, patching_mode="entropy", patching_threshold=1.335442066192627, patching_batch_size=1, max_patch_length=None, cross_attn_k=2, encoder_hash_byte_group_size=self.encoder_hash_byte_group_size, encoder_hash_byte_group_vocab=self.encoder_hash_byte_group_vocab, encoder_hash_byte_group_nb_functions=self.encoder_hash_byte_group_nb_functions, patcher_config=self.patcher_config, encoder_config=self.encoder_config, decoder_config=self.decoder_config, global_config=self.global_config, rope_parameters=self.rope_parameters, tie_word_embeddings=False, ) config.num_attention_heads = config.decoder_config.num_attention_heads config.num_hidden_layers = config.encoder_config.num_hidden_layers config.hidden_size = config.decoder_config.hidden_size return config @require_torch class BltModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = BltModelTester # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = BltForCausalLM if is_torch_available() else None @pytest.mark.generate @parameterized.expand([("greedy", 1), ("beam search", 2)]) @unittest.skip( "Blt requires real token IDs for its hash-based embedding computation, making inputs_embeds generation incompatible with identical outputs" ) def test_generate_from_inputs_embeds(self, _, num_beams): pass @pytest.mark.generate @unittest.skip( "Blt requires real token IDs for its hash-based embedding computation, making inputs_embeds generation incompatible with identical outputs" ) def test_inputs_embeds_matches_input_ids(self): pass @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference( self, name, torch_dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, ): "We need to relax a bit the `atols` for fp32 here due to the altup projections" atols = { ("cpu", False, torch.float32): 2e-2, # this was relaxed ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 2e-2, # this was relaxed ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 2e-2, # this was relaxed ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 2e-2, # this was relaxed ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } _test_eager_matches_sdpa_inference( self, name, torch_dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, atols=atols ) @require_torch_accelerator @slow def test_sdpa_can_dispatch_on_flash(self): self.skipTest("BLT always has an attention_mask input") @require_torch_accelerator class BltIntegrationTest(unittest.TestCase): def setup(self): cleanup(torch_device, gc_collect=True) def tearDown(self): # TODO (joao): automatic compilation, i.e. compilation when `cache_implementation="static"` is used, leaves # some memory allocated in the cache, which means some object is not being released properly. This causes some # unoptimal memory usage, e.g. after certain tests a 7B model in FP16 no longer fits in a 24GB GPU. # Investigate the root cause. cleanup(torch_device, gc_collect=True) @slow def test_model(self): NUM_TOKENS_TO_GENERATE = 200 EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan math club and the michigan computer s" prompt = "my name is" model = BltForCausalLM.from_pretrained("itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa") tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf") inputs = tokenizer(prompt, return_tensors="pt").to(model.device) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False ) output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXT) @slow def test_model_logits(self): EXPECTED_OUTPUT = torch.tensor( [ [ -10.4948, -10.7065, -6.1813, -10.5545, -10.3428, -9.1493, -8.4937, -8.6382, -9.2159, -9.5907, -9.3679, -8.4184, -9.0655, -3.4436, 2.9616, -10.3157, -6.3723, -6.0133, -9.7100, -9.2128, -8.8064, -9.8179, -9.7516, -9.4681, -9.7715, -9.4897, -9.0491, -9.8098, -9.4648, -9.3294, ], [ -13.3010, -13.1910, -5.7230, -13.2895, -13.4864, -8.7140, -7.0275, -7.0182, -10.1362, -10.3762, -9.9086, -7.8049, -8.8660, -5.2711, -3.5778, -12.5346, -9.1609, -6.7925, -10.3717, -9.2650, -10.6393, -11.4807, -11.2128, -10.9615, -10.5806, -10.8873, -11.0651, -11.3471, -10.5437, -9.9688, ], ] ).to(torch_device) input_ids = [1, 42, 21, 12, 43, 23, 1, 4] model = BltForCausalLM.from_pretrained("itazap/blt-1b-hf", attn_implementation="sdpa", device_map="auto") with torch.no_grad(): output = model(torch.tensor([input_ids]).to(torch_device))[0] torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4) @slow @require_torch_bf16 def test_model_bf16(self): """Test Blt model with bfloat16 precision.""" NUM_TOKENS_TO_GENERATE = 200 EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan in the college of arts and sciences. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan m" prompt = "my name is" model = BltForCausalLM.from_pretrained( "itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa", torch_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf") inputs = tokenizer(prompt, return_tensors="pt").to(model.device) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False ) output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXT) @slow @require_torch_bf16 def test_model_logits_bf16(self): """Test Blt model logits with bfloat16 precision.""" EXPECTED_OUTPUT = torch.tensor( [ [ -10.5000, -10.6875, -6.1875, -10.5625, -10.3125, -9.1875, -8.5000, -8.6875, -9.1875, -9.5625, -9.3750, -8.5000, -9.0625, -3.4219, 2.9531, -10.3125, -6.4062, -6.0000, -9.6875, -9.1875, -8.8125, -9.8125, -9.7500, -9.4375, -9.8125, -9.5000, -9.0000, -9.8125, -9.4375, -9.3125, ], [ -13.2500, -13.1875, -5.6875, -13.3125, -13.5000, -8.7500, -7.0625, -7.0312, -10.1250, -10.3750, -9.8750, -7.8438, -8.8750, -5.2812, -3.5625, -12.5000, -9.1875, -6.8125, -10.3750, -9.3125, -10.6250, -11.5000, -11.2500, -11.0000, -10.5625, -10.8750, -11.0625, -11.3750, -10.5625, -10.0000, ], ] ).to(torch_device) input_ids = [1, 42, 21, 12, 43, 23, 1, 4] model = BltForCausalLM.from_pretrained( "itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa", torch_dtype=torch.bfloat16 ) with torch.no_grad(): output = model(torch.tensor([input_ids]).to(torch_device))[0] torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-3, atol=1e-3) @slow def test_model_eager(self): """Test Blt model with bfloat16 precision using eager attention implementation.""" NUM_TOKENS_TO_GENERATE = 200 EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan math club and the michigan computer s" prompt = "my name is" model = BltForCausalLM.from_pretrained("itazap/blt-1b-hf", device_map="auto", attn_implementation="eager") tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf") inputs = tokenizer(prompt, return_tensors="pt").to(model.device) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False ) output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXT) @slow @require_torch_bf16 def test_model_bf16_static_cache(self): """Test Blt model with bfloat16 precision and static cache.""" NUM_TOKENS_TO_GENERATE = 200 EXPECTED_TEXT = "my name is alex and i am a student at the university of michigan in the college of arts and sciences. i am a senior majoring in computer science and minoring in mathematics. i am also a member of the michigan m" prompt = "my name is" model = BltForCausalLM.from_pretrained( "itazap/blt-1b-hf", device_map="auto", attn_implementation="sdpa", torch_dtype=torch.bfloat16 ) model.generation_config.cache_implementation = "static" tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf") inputs = tokenizer(prompt, return_tensors="pt").to(model.device) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False ) output_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXT)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/blt/test_modeling_blt.py", "license": "Apache License 2.0", "lines": 426, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/camembert/modular_camembert.py
# Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch CamemBERT model.""" import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring from ...utils.generic import can_return_tuple from ..roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) class CamembertPreTrainedModel(RobertaPreTrainedModel): base_model_prefix = "roberta" class CamembertModel(RobertaModel): pass class CamembertForMaskedLM(RobertaForMaskedLM): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) del self.camembert self.roberta = CamembertModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MaskedLMOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: # move labels to correct device labels = labels.to(prediction_scores.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class CamembertForSequenceClassification(RobertaForSequenceClassification): def __init__(self, config): super().__init__(config) del self.camembert self.roberta = CamembertModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | SequenceClassifierOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class CamembertForMultipleChoice(RobertaForMultipleChoice): def __init__(self, config): super().__init__(config) del self.camembert self.roberta = CamembertModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MultipleChoiceModelOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class CamembertForTokenClassification(RobertaForTokenClassification): def __init__(self, config): super().__init__(config) del self.camembert self.roberta = CamembertModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | TokenClassifierOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class CamembertForQuestionAnswering(RobertaForQuestionAnswering): def __init__(self, config): super().__init__(config) del self.camembert self.roberta = CamembertModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, start_positions: torch.LongTensor | None = None, end_positions: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | QuestionAnsweringModelOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class CamembertForCausalLM(RobertaForCausalLM): def __init__(self, config): super().__init__(config) del self.camembert self.roberta = CamembertModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, past_key_values: tuple[tuple[torch.FloatTensor]] | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | CausalLMOutputWithCrossAttentions: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, CamembertForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("almanach/camembert-base") >>> config = AutoConfig.from_pretrained("almanach/camembert-base") >>> config.is_decoder = True >>> model = CamembertForCausalLM.from_pretrained("almanach/camembert-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" if labels is not None: use_cache = False outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) __all__ = [ "CamembertForCausalLM", "CamembertForMaskedLM", "CamembertForMultipleChoice", "CamembertForQuestionAnswering", "CamembertForSequenceClassification", "CamembertForTokenClassification", "CamembertModel", "CamembertPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/camembert/modular_camembert.py", "license": "Apache License 2.0", "lines": 455, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/data2vec/modular_data2vec_text.py
# Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Data2VecText model.""" import torch import torch.nn as nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import can_return_tuple from ..roberta.modeling_roberta import ( RobertaClassificationHead, RobertaCrossAttention, RobertaEmbeddings, RobertaLayer, RobertaLMHead, RobertaModel, RobertaSelfAttention, ) from .configuration_data2vec_text import Data2VecTextConfig logger = logging.get_logger(__name__) class Data2VecTextEmbeddings(RobertaEmbeddings): pass class Data2VecTextSelfAttention(RobertaSelfAttention): pass class Data2VecTextCrossAttention(RobertaCrossAttention): pass class Data2VecTextLayer(RobertaLayer): pass @auto_docstring class Data2VecTextPreTrainedModel(PreTrainedModel): config_class = Data2VecTextConfig base_model_prefix = "data2vec_text" supports_gradient_checkpointing = True _no_split_modules = ["Data2VecTextForTextEmbeddings", "Data2VecTextLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": Data2VecTextLayer, "attentions": Data2VecTextSelfAttention, "cross_attentions": Data2VecTextCrossAttention, } def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Data2VecTextEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) init.zeros_(module.token_type_ids) @auto_docstring class Data2VecTextModel(RobertaModel): pass class Data2VecTextLMHead(RobertaLMHead): pass class Data2VecTextClassificationHead(RobertaClassificationHead): pass @auto_docstring( custom_intro=""" Data2VecText Model with a `language modeling` head on top for CLM fine-tuning. """ ) class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel, GenerationMixin): _tied_weights_keys = { "lm_head.decoder.weight": "data2vec_text.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`") self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.lm_head = Data2VecTextLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, past_key_values: tuple[tuple[torch.FloatTensor]] | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple | CausalLMOutputWithCrossAttentions: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base") >>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base") >>> config.is_decoder = True >>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" if labels is not None: use_cache = False outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @auto_docstring class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel): _tied_weights_keys = { "lm_head.decoder.weight": "data2vec_text.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.lm_head = Data2VecTextLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | MaskedLMOutput: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(prediction_scores.device) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.classifier = Data2VecTextClassificationHead(config) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | SequenceClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.data2vec_text = Data2VecTextModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | MultipleChoiceModelOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.data2vec_text( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(reshaped_logits.device) loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | TokenClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, start_positions: torch.LongTensor | None = None, end_positions: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | QuestionAnsweringModelOutput: outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/data2vec/modular_data2vec_text.py", "license": "Apache License 2.0", "lines": 503, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/ernie/modular_ernie.py
# Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ERNIE model.""" import torch import torch.nn as nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...masking_utils import create_bidirectional_mask, create_causal_mask from ...modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import can_return_tuple, merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ..bert.modeling_bert import ( BertCrossAttention, BertEmbeddings, BertEncoder, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForPreTrainingOutput, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertLMPredictionHead, BertModel, BertPooler, BertSelfAttention, ) from .configuration_ernie import ErnieConfig logger = logging.get_logger(__name__) class ErnieEmbeddings(BertEmbeddings): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__(config) self.use_task_id = config.use_task_id if config.use_task_id: self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size) def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, task_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): # NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0]) buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) # .to is better than using _no_split_modules on ErnieEmbeddings as it's the first module and >1/2 the model size inputs_embeds = inputs_embeds.to(token_type_embeddings.device) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings # add `task_type_id` for ERNIE model if self.use_task_id: if task_type_ids is None: task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) task_type_embeddings = self.task_type_embeddings(task_type_ids) embeddings += task_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class ErnieSelfAttention(BertSelfAttention): pass class ErnieCrossAttention(BertCrossAttention): pass class ErnieLayer(BertLayer): pass class ErniePooler(BertPooler): pass class ErnieLMPredictionHead(BertLMPredictionHead): pass class ErnieEncoder(BertEncoder): pass @auto_docstring class ErniePreTrainedModel(PreTrainedModel): config_class = ErnieConfig base_model_prefix = "ernie" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": ErnieLayer, "attentions": ErnieSelfAttention, "cross_attentions": ErnieCrossAttention, } @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" super()._init_weights(module) if isinstance(module, ErnieLMPredictionHead): init.zeros_(module.bias) elif isinstance(module, ErnieEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) init.zeros_(module.token_type_ids) class ErnieModel(BertModel): _no_split_modules = ["ErnieLayer"] def __init__(self, config, add_pooling_layer=True): super().__init__(self, config) self.config = config self.gradient_checkpointing = False self.embeddings = ErnieEmbeddings(config) self.encoder = ErnieEncoder(config) self.pooler = ErniePooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_attention_mask: torch.Tensor | None = None, past_key_values: Cache | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | BaseModelOutputWithPoolingAndCrossAttentions: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] """ if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None or self.config.is_encoder_decoder else DynamicCache(config=self.config) ) if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, # specific to ernie task_type_ids=task_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) attention_mask, encoder_attention_mask = self._create_attention_masks( attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, embedding_output=embedding_output, encoder_hidden_states=encoder_hidden_states, cache_position=cache_position, past_key_values=past_key_values, ) encoder_outputs = self.encoder( embedding_output, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, ) # Copied from transformers.models.bert.modeling_bert.BertModel._create_attention_masks def _create_attention_masks( self, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values, ): if self.config.is_decoder: attention_mask = create_causal_mask( config=self.config, inputs_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, ) else: attention_mask = create_bidirectional_mask( config=self.config, inputs_embeds=embedding_output, attention_mask=attention_mask, ) if encoder_attention_mask is not None: encoder_attention_mask = create_bidirectional_mask( config=self.config, inputs_embeds=embedding_output, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, ) return attention_mask, encoder_attention_mask class ErnieForPreTrainingOutput(BertForPreTrainingOutput): pass class ErnieForPreTraining(BertForPreTraining): _tied_weights_keys = { "cls.predictions.decoder.bias": "cls.predictions.bias", "cls.predictions.decoder.weight": "ernie.embeddings.word_embeddings.weight", } @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, labels: torch.Tensor | None = None, next_sentence_label: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | ErnieForPreTrainingOutput: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Example: ```python >>> from transformers import AutoTokenizer, ErnieForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") >>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ``` """ outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return ErnieForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ErnieForCausalLM(BertLMHeadModel): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_attention_mask: torch.Tensor | None = None, labels: torch.Tensor | None = None, past_key_values: list[torch.Tensor] | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | CausalLMOutputWithCrossAttentions: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` """ if labels is not None: use_cache = False outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.cls(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) class ErnieForMaskedLM(BertForMaskedLM): _tied_weights_keys = { "cls.predictions.decoder.bias": "cls.predictions.bias", "cls.predictions.decoder.weight": "ernie.embeddings.word_embeddings.weight", } @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_attention_mask: torch.Tensor | None = None, labels: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MaskedLMOutput: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ErnieForNextSentencePrediction(BertForNextSentencePrediction): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, labels: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | NextSentencePredictorOutput: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Example: ```python >>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") >>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ``` """ outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ErnieForSequenceClassification(BertForSequenceClassification): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, labels: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | SequenceClassifierOutput: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ErnieForMultipleChoice(BertForMultipleChoice): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, labels: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MultipleChoiceModelOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) task_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ErnieForTokenClassification(BertForTokenClassification): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, labels: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | TokenClassifierOutput: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ErnieForQuestionAnswering(BertForQuestionAnswering): @can_return_tuple @auto_docstring def forward( self, input_ids: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, token_type_ids: torch.Tensor | None = None, task_type_ids: torch.Tensor | None = None, position_ids: torch.Tensor | None = None, inputs_embeds: torch.Tensor | None = None, start_positions: torch.Tensor | None = None, end_positions: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | QuestionAnsweringModelOutput: r""" task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Task type embedding is a special embedding to represent the characteristic of different tasks, such as word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, config.task_type_vocab_size-1] """ outputs = self.ernie( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/ernie/modular_ernie.py", "license": "Apache License 2.0", "lines": 787, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/roberta/modular_roberta.py
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RoBERTa model.""" import torch import torch.nn as nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ... import initialization as init from ...activations import gelu from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import can_return_tuple from ..bert.modeling_bert import BertCrossAttention, BertEmbeddings, BertLayer, BertModel, BertSelfAttention from .configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) class RobertaEmbeddings(BertEmbeddings): def __init__(self, config): super().__init__(config) del self.pad_token_id del self.position_embeddings self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, past_key_values_length: int = 0, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): # NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0]) buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx class RobertaSelfAttention(BertSelfAttention): pass class RobertaCrossAttention(BertCrossAttention): pass class RobertaLayer(BertLayer): pass @auto_docstring class RobertaPreTrainedModel(PreTrainedModel): config_class = RobertaConfig base_model_prefix = "roberta" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": RobertaLayer, "attentions": RobertaSelfAttention, "cross_attentions": RobertaCrossAttention, } @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" super()._init_weights(module) if isinstance(module, RobertaLMHead): init.zeros_(module.bias) elif isinstance(module, RobertaEmbeddings): init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1))) init.zeros_(module.token_type_ids) class RobertaModel(BertModel): def __init__(self, config, add_pooling_layer=True): super().__init__(self, config) @auto_docstring( custom_intro=""" RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """ ) class RobertaForCausalLM(RobertaPreTrainedModel, GenerationMixin): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`") self.roberta = RobertaModel(config, add_pooling_layer=False) self.lm_head = RobertaLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, past_key_values: tuple[tuple[torch.FloatTensor]] | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | CausalLMOutputWithCrossAttentions: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, RobertaForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") >>> config = AutoConfig.from_pretrained("FacebookAI/roberta-base") >>> config.is_decoder = True >>> model = RobertaForCausalLM.from_pretrained("FacebookAI/roberta-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" if labels is not None: use_cache = False outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @auto_docstring class RobertaForMaskedLM(RobertaPreTrainedModel): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.roberta = RobertaModel(config, add_pooling_layer=False) self.lm_head = RobertaLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MaskedLMOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: # move labels to correct device labels = labels.to(prediction_scores.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class RobertaLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x @auto_docstring( custom_intro=""" RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class RobertaForSequenceClassification(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roberta = RobertaModel(config, add_pooling_layer=False) self.classifier = RobertaClassificationHead(config) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | SequenceClassifierOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class RobertaForMultipleChoice(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.roberta = RobertaModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MultipleChoiceModelOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class RobertaForTokenClassification(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = RobertaModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | TokenClassifierOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @auto_docstring class RobertaForQuestionAnswering(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = RobertaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, start_positions: torch.LongTensor | None = None, end_positions: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | QuestionAnsweringModelOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", "RobertaPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/roberta/modular_roberta.py", "license": "Apache License 2.0", "lines": 646, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/xlm_roberta/modular_xlm_roberta.py
# Copyright 2019 Facebook AI Research and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch XLM-RoBERTa model.""" import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring from ...utils.generic import can_return_tuple from ..roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) @auto_docstring class XLMRobertaPreTrainedModel(RobertaPreTrainedModel): base_model_prefix = "roberta" @auto_docstring class XLMRobertaModel(RobertaModel): pass @auto_docstring( custom_intro=""" XLM-RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """ ) class XLMRobertaForCausalLM(RobertaForCausalLM): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) del self.xlm_roberta self.roberta = XLMRobertaModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, past_key_values: tuple[tuple[torch.FloatTensor]] | None = None, use_cache: bool | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | CausalLMOutputWithCrossAttentions: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, XLMRobertaForCausalLM, AutoConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") >>> config = AutoConfig.from_pretrained("FacebookAI/roberta-base") >>> config.is_decoder = True >>> model = XLMRobertaForCausalLM.from_pretrained("FacebookAI/roberta-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" if labels is not None: use_cache = False outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, return_dict=True, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @auto_docstring class XLMRobertaForMaskedLM(RobertaForMaskedLM): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) del self.xlm_roberta self.roberta = XLMRobertaModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MaskedLMOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: # move labels to correct device labels = labels.to(prediction_scores.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class XLMRobertaForSequenceClassification(RobertaForSequenceClassification): def __init__(self, config): super().__init__(config) del self.xlm_roberta self.roberta = XLMRobertaModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | SequenceClassifierOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMRobertaForMultipleChoice(RobertaForMultipleChoice): def __init__(self, config): super().__init__(config) del self.xlm_roberta self.roberta = XLMRobertaModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | MultipleChoiceModelOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMRobertaForTokenClassification(RobertaForTokenClassification): def __init__(self, config): super().__init__(config) del self.xlm_roberta self.roberta = XLMRobertaModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | TokenClassifierOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMRobertaForQuestionAnswering(RobertaForQuestionAnswering): def __init__(self, config): super().__init__(config) del self.xlm_roberta self.roberta = XLMRobertaModel(config, add_pooling_layer=False) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, start_positions: torch.LongTensor | None = None, end_positions: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | QuestionAnsweringModelOutput: r""" token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/xlm_roberta/modular_xlm_roberta.py", "license": "Apache License 2.0", "lines": 474, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch XLM RoBERTa xl,xxl model.""" import torch import torch.nn as nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import can_return_tuple from ..bert.modeling_bert import ( BertAttention, BertCrossAttention, BertLayer, BertModel, BertSelfAttention, ) from ..roberta.modeling_roberta import ( RobertaClassificationHead, RobertaEmbeddings, RobertaPreTrainedModel, ) logger = logging.get_logger(__name__) class XLMRobertaXLEmbeddings(RobertaEmbeddings): def __init__(self, config): super().__init__(config) del self.LayerNorm def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, past_key_values_length: int = 0, ) -> torch.Tensor: if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): # NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0]) buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings embeddings = self.dropout(embeddings) return embeddings class XLMRobertaXLSelfAttention(BertSelfAttention): pass class XLMRobertaXLCrossAttention(BertCrossAttention): pass class XLMRobertaXLSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class XLMRobertaXLAttention(BertAttention): def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False): super().__init__(config, is_causal, layer_idx, is_cross_attention) del self.LayerNorm self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, past_key_values: tuple[tuple[torch.FloatTensor]] | None = None, cache_position: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: intermediate = self.self_attn_layer_norm(hidden_states) attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask attention_output, attn_weights = self.self( intermediate, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) attention_output = self.output(attention_output, hidden_states) return attention_output, attn_weights class XLMRobertaXLOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class XLMRobertaXLLayer(BertLayer): def __init__(self, config, layer_idx=None): super().__init__(config, layer_idx) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def feed_forward_chunk(self, attention_output): intermediate_output = self.LayerNorm(attention_output) intermediate_output = self.intermediate(intermediate_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class XLMRobertaXLEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([XLMRobertaXLLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, past_key_values: Cache | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor] | BaseModelOutputWithPastAndCrossAttentions: for i, layer_module in enumerate(self.layer): hidden_states = layer_module( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) # Extra layernorm at the end (causes high fluctuations between different attentions) hidden_states = self.LayerNorm(hidden_states) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) @auto_docstring class XLMRobertaXLPreTrainedModel(RobertaPreTrainedModel): base_model_prefix = "roberta" class XLMRobertaXLModel(BertModel): pass class XLMRobertaXLLMHead(nn.Module): """XLM-RoBERTa-XL Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x class XLMRobertaXLClassificationHead(RobertaClassificationHead): pass @auto_docstring( custom_intro=""" XLM-RoBERTa-XL Model with a `language modeling` head on top for CLM fine-tuning. """ ) class XLMRobertaXLForCausalLM(XLMRobertaXLPreTrainedModel, GenerationMixin): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`") self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False) self.lm_head = XLMRobertaXLLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.FloatTensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, past_key_values: tuple[tuple[torch.FloatTensor]] | None = None, use_cache: bool | None = None, cache_position: torch.Tensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple | CausalLMOutputWithCrossAttentions: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Example: ```python >>> from transformers import AutoTokenizer, RobertaForCausalLM, RobertaConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") >>> config = RobertaConfig.from_pretrained("FacebookAI/roberta-base") >>> config.is_decoder = True >>> model = RobertaForCausalLM.from_pretrained("FacebookAI/roberta-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ``` """ if labels is not None: use_cache = False outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @auto_docstring class XLMRobertaXLForMaskedLM(XLMRobertaXLPreTrainedModel): _tied_weights_keys = { "lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight", "lm_head.decoder.bias": "lm_head.bias", } def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False) self.lm_head = XLMRobertaXLLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | MaskedLMOutput: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" XLM-RoBERTa-XL Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class XLMRobertaXLForSequenceClassification(XLMRobertaXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False) self.classifier = XLMRobertaXLClassificationHead(config) self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | SequenceClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMRobertaXLForMultipleChoice(XLMRobertaXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.roberta = XLMRobertaXLModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, token_type_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | MultipleChoiceModelOutput: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMRobertaXLForTokenClassification(XLMRobertaXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | TokenClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class XLMRobertaXLForQuestionAnswering(XLMRobertaXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.FloatTensor | None = None, token_type_ids: torch.LongTensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, start_positions: torch.LongTensor | None = None, end_positions: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | QuestionAnsweringModelOutput: outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py", "license": "Apache License 2.0", "lines": 633, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/lfm2_vl/configuration_lfm2_vl.py
# Copyright 2025 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LFM2-VL model.""" from ...configuration_utils import PreTrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class Lfm2VlConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Lfm2VlForConditionalGeneration`]. It is used to instantiate an Lfm2Vl model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Lfm2-VL-1.6B. e.g. [LiquidAI/LFM2-VL-1.6B](https://huggingface.co/LiquidAI/LFM2-VL-1.6B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vision_config (`AutoConfig | dict`, *optional*, defaults to `Siglip2ImageConfig`): The config object or dictionary of the vision backbone. text_config (`AutoConfig | dict`, *optional*, defaults to `Lfm2Config`): The config object or dictionary of the text backbone. image_token_id (`int`, *optional*, defaults to 396): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. projector_hidden_size (`int`, *optional*, defaults to 2560): The hidden size of the multimodal projector. projector_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the multimodal projector. projector_use_layernorm (`bool`, *optional*, defaults to `True`): Whether to use layernorm in the multimodal projector. downsample_factor (`int`, *optional*, defaults to 2): The downsample_factor factor of the vision backbone. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie the word embeddings of the text backbone. """ model_type = "lfm2_vl" sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} def __init__( self, vision_config=None, text_config=None, image_token_id=396, projector_hidden_act="gelu", projector_hidden_size=2560, projector_bias=True, projector_use_layernorm=True, downsample_factor=2, tie_word_embeddings=True, **kwargs, ): self.image_token_id = image_token_id self.projector_hidden_act = projector_hidden_act self.projector_hidden_size = projector_hidden_size self.projector_bias = projector_bias self.projector_use_layernorm = projector_use_layernorm self.downsample_factor = downsample_factor if isinstance(vision_config, dict): vision_config["model_type"] = vision_config.get("model_type", "siglip2_vision_model") vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: vision_config = CONFIG_MAPPING["siglip2_vision_model"]() if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "lfm2") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["lfm2"]() self.vision_config = vision_config self.text_config = text_config self.tie_word_embeddings = getattr(text_config, "tie_embedding", tie_word_embeddings) super().__init__(**kwargs) __all__ = ["Lfm2VlConfig"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/lfm2_vl/configuration_lfm2_vl.py", "license": "Apache License 2.0", "lines": 82, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py
# Copyright 2025 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from functools import lru_cache import torch import torchvision.transforms.v2.functional as tvF from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, group_images_by_shape, reorder_images, ) from ...image_transforms import split_to_tiles from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import ImagesKwargs, Unpack from ...utils import ( TensorType, auto_docstring, logging, ) logger = logging.get_logger(__name__) def round_by_factor(number: float, factor: int) -> int: """Returns the closest integer to 'number' that is divisible by 'factor'.""" return round(number / factor) * factor def find_closest_aspect_ratio( aspect_ratio: float, target_ratios: list[tuple[int, int]], width: int, height: int, image_size: int, ) -> tuple[int, int]: """Find the closest aspect ratio from target_ratios to match the input aspect ratio. Args: aspect_ratio: The aspect ratio to match (width/height). target_ratios: List of possible aspect ratios as tuples of (width, height) integers. width: Original image width in pixels. height: Original image height in pixels. image_size: Base size for calculating target area. Returns: tuple[int, int]: The best matching ratio as (width, height) integers. """ best_ratio_diff = float("inf") best_ratio = (1, 1) area = width * height for ratio in target_ratios: target_aspect_ratio = ratio[0] / ratio[1] ratio_diff = abs(aspect_ratio - target_aspect_ratio) # update best ratio if we found a closer match if ratio_diff < best_ratio_diff: best_ratio_diff = ratio_diff best_ratio = ratio # if equally close, prefer the ratio that better matches the original image area elif ratio_diff == best_ratio_diff: target_area = image_size * image_size * ratio[0] * ratio[1] if area > 0.5 * target_area: best_ratio = ratio return best_ratio # copied from Siglip2ImageProcessor @lru_cache(maxsize=256) def get_image_size_for_max_num_patches( image_height: int, image_width: int, patch_size: int, max_num_patches: int, eps: float = 1e-5 ) -> tuple[int, int]: """ Determine image size based on max number of patches, ensure dimensions are divisible by patch size and image is at least 1 patch. Args: image_height (`int`): Original image height. image_width (`int`): Original image width. patch_size (`int`): Patch size for processing. max_num_patches (`int`): Maximum number of patches. eps (`float`): Small threshold for binary search. Returns: Tuple: (target_height, target_width) """ def get_scaled_image_size(scale: float, size: int, patch_size: int) -> int: scaled_size = size * scale scaled_size = math.ceil(scaled_size / patch_size) * patch_size # make divisible by patch_size scaled_size = max(patch_size, scaled_size) # ensure at least 1 patch return int(scaled_size) # Binary search for optimal scale scale_min, scale_max = eps / 10, 100.0 while (scale_max - scale_min) >= eps: scale = (scale_min + scale_max) / 2 target_height = get_scaled_image_size(scale, image_height, patch_size) target_width = get_scaled_image_size(scale, image_width, patch_size) num_patches = (target_height / patch_size) * (target_width / patch_size) if num_patches <= max_num_patches: scale_min = scale else: scale_max = scale scale = scale_min target_height = get_scaled_image_size(scale, image_height, patch_size) target_width = get_scaled_image_size(scale, image_width, patch_size) return target_height, target_width def convert_image_to_patches(images: "torch.Tensor", patch_size: int) -> "torch.Tensor": """ Convert 3D array image of shape (image_height, image_width, num_channels) into 2D array of patches of shape (num_patches_height * num_patches_width, patch_size * patch_size * num_channels). """ batch_size, num_channels, image_height, image_width = images.shape num_patches_height = image_height // patch_size num_patches_width = image_width // patch_size patched_image = images.reshape( batch_size, num_channels, num_patches_height, patch_size, num_patches_width, patch_size ) patched_image = patched_image.permute(0, 2, 4, 3, 5, 1) patched_image = patched_image.reshape(batch_size, num_patches_height * num_patches_width, -1) return patched_image def pad_along_first_dim( images: "torch.Tensor", target_length: int, pad_value: int = 0 ) -> tuple["torch.Tensor", "torch.Tensor"]: """ Pad the array along the first dimension. """ current_length = images.shape[1] padding_length = target_length - current_length pixel_mask = torch.ones((target_length,), dtype=torch.int32) if padding_length > 0: paddings = (0, 0, 0, padding_length, 0, 0) images = torch.nn.functional.pad(images, paddings, mode="constant", value=pad_value) pixel_mask[-padding_length:] = 0 return images, pixel_mask class Lfm2VlImageProcessorKwargs(ImagesKwargs, total=False): """ downsample_factor (`int`, *optional*, defaults to `2`): The downsampling factor for images used when resizing the image. do_image_splitting (`bool`, *optional*, defaults to `True`): Whether to split large images into a grid of smaller tiles. When enabled, images exceeding the maximum token limit are divided into multiple tiles based on `min_tiles` and `max_tiles` constraints. min_tiles (`int`, *optional*, defaults to `2`): Minimum number of tiles (width × height) to use when splitting an image into a grid. The grid configuration is chosen to maintain the original aspect ratio while staying within the `min_tiles` and `max_tiles` range. max_tiles (`int`, *optional*, defaults to `10`): Maximum number of tiles (width × height) to use when splitting an image into a grid. The grid configuration is chosen to maintain the original aspect ratio while staying within the `min_tiles` and `max_tiles` range. use_thumbnail (`bool`, *optional*, defaults to `True`): Whether to include a thumbnail version of the image when splitting into tiles. The thumbnail provides a low-resolution overview of the entire image and is added as an additional patch when the grid has more than one tile. min_image_tokens (`int`, *optional*, defaults to `64`): Minimum number of image tokens (patches) to generate for an image. Images smaller than this threshold will be upscaled to meet the minimum token requirement. max_image_tokens (`int`, *optional*, defaults to `256`): Maximum number of image tokens (patches) allowed for a single image. Images exceeding this limit will be split into multiple tiles or downscaled accordingly. encoder_patch_size (`int`, *optional*, defaults to `16`): The patch size used by the vision encoder. Images are divided into patches of this size, and both height and width must be divisible by this value (after accounting for the downsampling factor). tile_size (`int`, *optional*, defaults to `512`): The size of each tile when splitting large images into a grid. Each tile will be resized to this dimension before being processed into patches. max_pixels_tolerance (`float`, *optional*, defaults to `2.0`): Tolerance factor for determining if an image is too large. An image is considered too large if its pixel count exceeds `max_image_tokens * encoder_patch_size^2 * downsample_factor^2 * max_pixels_tolerance`. return_row_col_info (`bool`, *optional*, defaults to `False`): Whether to return row and column information for each image in the batch. When enabled, the output includes `image_rows`, `image_cols`, and `image_sizes` fields indicating the grid layout and dimensions of processed images. """ downsample_factor: int do_image_splitting: bool min_tiles: int max_tiles: int use_thumbnail: bool min_image_tokens: int max_image_tokens: int encoder_patch_size: int tile_size: int max_pixels_tolerance: float do_pad: bool return_row_col_info: bool @auto_docstring class Lfm2VlImageProcessorFast(BaseImageProcessorFast): downsample_factor = 2 do_image_splitting = True min_tiles = 2 max_tiles = 10 use_thumbnail = True min_image_tokens = 64 max_image_tokens = 256 encoder_patch_size = 16 tile_size = 512 max_pixels_tolerance = 2.0 do_resize = True size = {"height": 512, "width": 512} resample = PILImageResampling.BILINEAR do_rescale = True rescale_factor = 1 / 255 do_normalize = True do_pad = True return_row_col_info = False image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD valid_kwargs = Lfm2VlImageProcessorKwargs model_input_names = ["pixel_values", "pixel_attention_mask", "spatial_shapes"] def __init__(self, **kwargs: Unpack[Lfm2VlImageProcessorKwargs]): super().__init__(**kwargs) max_thumbnail_image_patches = self.max_image_tokens * self.downsample_factor**2 tile_size_patches = (self.tile_size // self.encoder_patch_size) ** 2 if self.do_image_splitting else 0 self.max_num_patches = max( max_thumbnail_image_patches, tile_size_patches, ) @lru_cache(maxsize=256) def _target_ratios(self, min_tiles: int, max_tiles: int) -> list[tuple[int, int]]: ratios = [ (w, h) for n in range(min_tiles, max_tiles + 1) for w in range(1, n + 1) for h in range(1, n + 1) if min_tiles <= w * h <= max_tiles ] return sorted(set(ratios), key=lambda x: x[0] * x[1]) def _get_grid_layout( self, height: int, width: int, min_tiles: int, max_tiles: int, tile_size: int, ) -> tuple[int, int]: aspect_ratio = width / height target_ratios = self._target_ratios(min_tiles, max_tiles) # find best matching grid configuration grid_width, grid_height = find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, tile_size) target_width = tile_size * grid_width target_height = tile_size * grid_height total_patches = grid_width * grid_height return grid_width, grid_height, target_width, target_height, total_patches def crop_image_to_patches( self, image: "torch.Tensor", min_tiles: int, max_tiles: int, tile_size: int, use_thumbnail: bool, thumbnail_size: tuple[int], interpolation: "tvF.InterpolationMode" = None, antialias: bool = True, **kwargs, ) -> "torch.Tensor": """ Processes a high resolution image into patches. This method splits a high resolution image into a grid of smaller patches while trying to maintain the original aspect ratio. It finds the optimal grid configuration within the specified tile constraints. """ batch_size, num_channels, height, width = image.shape grid_width, grid_height, target_width, target_height, total_patches = self._get_grid_layout( height, width, min_tiles=min_tiles, max_tiles=max_tiles, tile_size=tile_size ) resized_image = tvF.resize( image, (target_height, target_width), interpolation=interpolation, antialias=antialias ) # split the image into patches processed_images = split_to_tiles(resized_image, num_tiles_height=grid_height, num_tiles_width=grid_width) # Re-order processed images to a nested image structure, so it can be reordered back correctly # Note that the images can't be stacked because the thumbnail image is of bigger size than patches # Each image in sublist will be of shape (1, C, H, W) processed_images = list(processed_images) if use_thumbnail and grid_width * grid_height != 1: total_patches += 1 thumbnail_image = tvF.resize(image, thumbnail_size, interpolation=interpolation, antialias=antialias) for i in range(batch_size): processed_images[i] = list(processed_images[i]) + list(thumbnail_image[i][None, ...]) return processed_images, grid_width, grid_height # Adapted from Qwen-VL with minor differences def smart_resize( self, height: int, width: int, downsample_factor: int, min_image_tokens: int, max_image_tokens: int, encoder_patch_size: int, ) -> tuple[int, int]: """ Rescales the image so that the following conditions are met: 1. Both dimensions (height and width) are divisible by 'encoder_patch_size' * 'downsample_factor'. This ensures no padding is needed in the downsampling step. 2. The total number of pixels is within the range ['smart_resize_min_pixels', 'smart_resize_max_pixels']. 3. The aspect ratio of the image is maintained as closely as possible. """ total_factor = encoder_patch_size * downsample_factor smart_resize_min_pixels = min_image_tokens * encoder_patch_size**2 * downsample_factor**2 smart_resize_max_pixels = max_image_tokens * encoder_patch_size**2 * downsample_factor**2 h_bar = max(total_factor, round_by_factor(height, total_factor)) w_bar = max(total_factor, round_by_factor(width, total_factor)) if h_bar * w_bar > smart_resize_max_pixels: beta = math.sqrt((height * width) / smart_resize_max_pixels) math.floor(height / beta / total_factor) * total_factor h_bar = max(total_factor, math.floor(height / beta / total_factor) * total_factor) w_bar = max(total_factor, math.floor(width / beta / total_factor) * total_factor) elif h_bar * w_bar < smart_resize_min_pixels: beta = math.sqrt(smart_resize_min_pixels / (height * width)) h_bar = math.ceil(height * beta / total_factor) * total_factor w_bar = math.ceil(width * beta / total_factor) * total_factor return w_bar, h_bar def _is_image_too_large( self, height: int, width: int, max_image_tokens: int, encoder_patch_size: int, downsample_factor: int, max_pixels_tolerance: float, ) -> bool: """Check if the image is too large to be processed as one tile.""" total_factor = encoder_patch_size * downsample_factor h_bar = max(encoder_patch_size, round_by_factor(height, total_factor)) w_bar = max(encoder_patch_size, round_by_factor(width, total_factor)) return h_bar * w_bar > max_image_tokens * encoder_patch_size**2 * downsample_factor**2 * max_pixels_tolerance def resize_and_split( self, images: "torch.Tensor", downsample_factor: int, min_tiles: int, max_tiles: int, use_thumbnail: bool, min_image_tokens: int, max_image_tokens: int, encoder_patch_size: int, tile_size: int, max_pixels_tolerance: float, interpolation: "tvF.InterpolationMode", ) -> "torch.Tensor": batch_size, _, height, width = images.shape do_image_splitting = not min_tiles == max_tiles == 1 is_image_large = self._is_image_too_large( height=height, width=width, max_image_tokens=max_image_tokens, encoder_patch_size=encoder_patch_size, downsample_factor=downsample_factor, max_pixels_tolerance=max_pixels_tolerance, ) new_width, new_height = self.smart_resize( height=height, width=width, downsample_factor=downsample_factor, min_image_tokens=min_image_tokens, max_image_tokens=max_image_tokens, encoder_patch_size=encoder_patch_size, ) # Big image will be cropped into patches and small images are just resized if is_image_large and do_image_splitting: images, num_cols, num_rows = self.crop_image_to_patches( images, min_tiles=min_tiles, max_tiles=max_tiles, tile_size=tile_size, thumbnail_size=(new_height, new_width), use_thumbnail=use_thumbnail, interpolation=interpolation, ) else: num_rows = num_cols = 1 images = tvF.resize(images, (new_height, new_width), interpolation=interpolation) # Make a list and treat it as single crop per image so it can be re-grouped back correctly images = [[image] for image in images] num_rows = [num_rows] * batch_size num_cols = [num_cols] * batch_size image_sizes = [[new_height, new_width]] * batch_size return images, num_rows, num_cols, image_sizes def _preprocess( self, images: ImageInput, size: SizeDict, interpolation: "tvF.InterpolationMode", do_resize: bool, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: float | list[float], image_std: float | list[float], downsample_factor: int, do_image_splitting: bool, min_tiles: int, max_tiles: int, use_thumbnail: bool, min_image_tokens: int, max_image_tokens: int, encoder_patch_size: int, tile_size: int, max_pixels_tolerance: float, return_tensors: str | TensorType, disable_grouping: bool, do_pad: bool, return_row_col_info: bool, **kwargs, ) -> BatchFeature: if not do_image_splitting: min_tiles = 1 max_tiles = 1 logger.debug( "Image splitting is disabled, setting min_tiles and max_tiles to 1. Set do_image_splitting=True to enable splitting." ) if do_image_splitting and min_tiles > max_tiles: raise ValueError("min_tiles must be less than or equal to max_tiles") max_thumbnail_image_patches = max_image_tokens * downsample_factor**2 tile_size_patches = (tile_size // encoder_patch_size) ** 2 if do_image_splitting else 0 max_num_patches = max( max_thumbnail_image_patches, tile_size_patches, ) grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} resized_image_sizes = {} rows_grouped, cols_grouped = {}, {} for shape, stacked_images in grouped_images.items(): num_rows = [1] * stacked_images.shape[0] num_cols = [1] * stacked_images.shape[0] height, width = stacked_images.shape[-2:] image_sizes = [[height, width]] * stacked_images.shape[0] do_resize = True if do_resize: stacked_images, num_rows, num_cols, image_sizes = self.resize_and_split( stacked_images, downsample_factor=downsample_factor, min_tiles=min_tiles, max_tiles=max_tiles, use_thumbnail=use_thumbnail, min_image_tokens=min_image_tokens, max_image_tokens=max_image_tokens, encoder_patch_size=encoder_patch_size, tile_size=tile_size, max_pixels_tolerance=max_pixels_tolerance, interpolation=interpolation, ) rows_grouped[shape] = num_rows cols_grouped[shape] = num_cols resized_image_sizes[shape] = image_sizes resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) batch_rows = reorder_images(rows_grouped, grouped_images_index) batch_cols = reorder_images(cols_grouped, grouped_images_index) resized_image_sizes = reorder_images(resized_image_sizes, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape( resized_images, disable_grouping=disable_grouping, is_nested=True ) processed_images_grouped = {} processed_masks, processed_spatial_shapes = {}, {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) batch_size, *_, height, width = stacked_images.shape num_patches_height = height // encoder_patch_size num_patches_width = width // encoder_patch_size stacked_images = convert_image_to_patches(stacked_images, encoder_patch_size) processed_spatial_shapes[shape] = [[num_patches_height, num_patches_width]] * batch_size if do_pad: stacked_images, pixel_mask = pad_along_first_dim(stacked_images, max_num_patches) processed_masks[shape] = [pixel_mask] * batch_size processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True) data = {"pixel_values": torch.cat([torch.stack(images) for images in processed_images])} if do_pad: processed_masks = reorder_images(processed_masks, grouped_images_index, is_nested=True) processed_spatial_shapes = reorder_images(processed_spatial_shapes, grouped_images_index, is_nested=True) processed_masks = torch.cat([torch.stack(masks) for masks in processed_masks]) processed_spatial_shapes = torch.cat( [torch.tensor(spatial_shape) for spatial_shape in processed_spatial_shapes] ) data.update({"pixel_attention_mask": processed_masks, "spatial_shapes": processed_spatial_shapes}) if return_row_col_info: data["image_rows"] = batch_rows data["image_cols"] = batch_cols data["image_sizes"] = resized_image_sizes encoding = BatchFeature(data=data, tensor_type=return_tensors) return encoding __all__ = ["Lfm2VlImageProcessorFast"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py", "license": "Apache License 2.0", "lines": 492, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/lfm2_vl/modular_lfm2_vl.py
# Copyright 2025 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Lfm2-VL model.""" import torch from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...modeling_outputs import BaseModelOutputWithPooling from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_compilable_check from ..llava.modeling_llava import ( LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast, LlavaPreTrainedModel, ) from .configuration_lfm2_vl import Lfm2VlConfig logger = logging.get_logger(__name__) class Lfm2VlMultiModalProjector(nn.Module): def __init__(self, config: Lfm2VlConfig): super().__init__() in_channels = config.vision_config.hidden_size * (config.downsample_factor**2) self.factor = config.downsample_factor self.use_layer_norm = config.projector_use_layernorm self.layer_norm = nn.LayerNorm(in_channels) if config.projector_use_layernorm else None self.linear_1 = nn.Linear( in_channels, config.projector_hidden_size, bias=config.projector_bias, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear( config.projector_hidden_size, config.text_config.hidden_size, bias=config.projector_bias, ) def forward(self, image_features: torch.Tensor): image_features = self.pixel_unshuffle(image_features) if self.use_layer_norm: image_features = self.layer_norm(image_features) hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states def pixel_unshuffle(self, hidden_states: torch.Tensor): batch_size, width, height, channels = hidden_states.size() hidden_states = hidden_states.reshape(batch_size, width, height // self.factor, channels * self.factor) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape( batch_size, height // self.factor, width // self.factor, channels * self.factor**2 ) hidden_states = hidden_states.permute(0, 2, 1, 3) return hidden_states class Lfm2VlPreTrainedModel(LlavaPreTrainedModel): _can_compile_fullgraph = False base_model_prefix = "model" class Lfm2VlCausalLMOutputWithPast(LlavaCausalLMOutputWithPast): pass class Lfm2VlModelOutputWithPast(LlavaModelOutputWithPast): pass class Lfm2VlModel(LlavaModel): _checkpoint_conversion_mapping = {} def __init__(self, config: Lfm2VlConfig): super().__init__(config) @can_return_tuple @auto_docstring( custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection." ) def get_image_features( self, pixel_values: torch.FloatTensor, spatial_shapes: torch.Tensor, pixel_attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutputWithPooling: r""" pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`): The spatial shapes of the input images. pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`): The pixel attention mask of the input images. """ image_outputs = self.vision_tower( pixel_values=pixel_values, spatial_shapes=spatial_shapes, pixel_attention_mask=pixel_attention_mask, return_dict=True, **kwargs, ) last_hidden_state = image_outputs.last_hidden_state img_feature_lengths = pixel_attention_mask.sum(dim=1) image_features = [] for img_idx in range(last_hidden_state.size(0)): feature = last_hidden_state[img_idx] # unpad the image representation feature = feature[: img_feature_lengths[img_idx], :].unsqueeze(0) # reshape to original height and width feature_org_h, feature_org_w = spatial_shapes[img_idx] feature = feature.reshape(1, feature_org_h, feature_org_w, -1) # project the image representation img_embedding = self.multi_modal_projector(feature) # flatten here to handle variable length in naflex img_embedding = img_embedding.reshape(-1, img_embedding.size(-1)) image_features.append(img_embedding) image_outputs.pooler_output = image_features return image_outputs def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor ): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) n_image_features = image_features.shape[0] torch_compilable_check( inputs_embeds[special_image_mask].numel() == image_features.numel(), f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {n_image_features}", ) return special_image_mask @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, pixel_values: torch.FloatTensor | None = None, spatial_shapes: torch.Tensor | None = None, pixel_attention_mask: torch.Tensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Lfm2VlModelOutputWithPast: r""" spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*): The spatial shapes of the input images. pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*): The pixel attention mask of the input images. """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_features = self.get_image_features( pixel_values=pixel_values, spatial_shapes=spatial_shapes, pixel_attention_mask=pixel_attention_mask, return_dict=True, ).pooler_output image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( input_ids=input_ids, inputs_embeds=inputs_embeds, image_features=image_features, ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) return Lfm2VlModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) class Lfm2VlForConditionalGeneration(LlavaForConditionalGeneration): _checkpoint_conversion_mapping = {} @auto_docstring def get_image_features( self, pixel_values: torch.FloatTensor, spatial_shapes: torch.Tensor, pixel_attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutputWithPooling: r""" pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`): The spatial shapes of the input images. pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`): The pixel attention mask of the input images. """ return self.model.get_image_features( pixel_values=pixel_values, spatial_shapes=spatial_shapes, pixel_attention_mask=pixel_attention_mask, **kwargs, ) @can_return_tuple def forward( self, input_ids: torch.LongTensor | None = None, pixel_values: torch.FloatTensor | None = None, spatial_shapes: torch.Tensor | None = None, pixel_attention_mask: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Lfm2VlCausalLMOutputWithPast: r""" pixel_values (`torch.FloatTensor` of shape `(batch_size, channels, height, width)`, *optional*): The input image tensors. spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*): The spatial shapes of the input images. pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*): The pixel attention mask of the input images. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import httpx >>> from io import BytesIO >>> from transformers import AutoProcessor, AutoModelForImageTextToText >>> from transformers.image_utils import load_image >>> model = AutoModelForImageTextToText.from_pretrained( ... "LiquidAI/LFM2-VL-1.6B", ... ) >>> processor = AutoProcessor.from_pretrained( ... "LiquidAI/LFM2-VL-1.6B", ... ) >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = load_image(url) >>> conversation = [ ... { ... "role": "user", ... "content": [ ... {"type": "image", "image": image}, ... {"type": "text", "text": "What is in this image?"}, ... ], ... }, ... ] >>> inputs = processor.apply_chat_template( ... conversation, ... add_generation_prompt=True, ... tokenize=True, ... return_dict=True, ... return_tensors="pt" ... ) >>> # Generate >>> outputs = model.generate(**inputs, max_new_tokens=45) >>> processor.batch_decode(outputs, skip_special_tokens=True)[0] 'This image depicts a vibrant street scene in what appears to be a Chinatown or similar cultural area. The focal point is a large red stop sign with white lettering, mounted on a pole.' ```""" outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, spatial_shapes=spatial_shapes, pixel_attention_mask=pixel_attention_mask, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs, ) return Lfm2VlCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) __all__ = ["Lfm2VlForConditionalGeneration", "Lfm2VlPreTrainedModel", "Lfm2VlModel"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/lfm2_vl/modular_lfm2_vl.py", "license": "Apache License 2.0", "lines": 319, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/lfm2_vl/processing_lfm2_vl.py
# Copyright 2025 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, make_nested_list_of_images from ...processing_utils import ( ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack, ) from ...tokenization_utils_base import BatchEncoding, TextInput from ...utils import auto_docstring, logging logger = logging.get_logger(__name__) class Lfm2VlTextKwargs(TextKwargs, total=False): """ use_image_special_tokens (`bool`, *optional*, defaults to `True`): Whether to use special image tokens (`<|image_start|>` and `<|image_end|>`) to delimit image sequences in the text. When enabled, images are wrapped with these tokens to clearly mark image boundaries. When disabled, only the image token itself is used without delimiters. """ use_image_special_tokens: bool | None class Lfm2VlProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: Lfm2VlTextKwargs _defaults = { "images_kwargs": { "return_row_col_info": True, }, "text_kwargs": { "use_image_special_tokens": True, "add_special_tokens": False, "padding": False, "is_split_into_words": False, }, } @auto_docstring class Lfm2VlProcessor(ProcessorMixin): def __init__( self, image_processor, tokenizer, chat_template: str | None = None, **kwargs, ): self.image_token = getattr(tokenizer, "image_token", "<image>") self.image_token_id = ( tokenizer.image_token_id if hasattr(tokenizer, "image_token_id") else tokenizer.convert_tokens_to_ids(self.image_token) ) self.image_start_token = getattr(tokenizer, "image_start_token", "<|image_start|>") self.image_end_token = getattr(tokenizer, "image_end_token", "<|image_end|>") self.image_thumbnail_token = getattr(tokenizer, "image_thumbnail_token", "<|img_thumbnail|>") super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs) @auto_docstring def __call__( self, images: ImageInput | list[ImageInput] | list[list[ImageInput]] | None = None, text: TextInput | list[TextInput] | None = None, **kwargs: Unpack[Lfm2VlProcessorKwargs], ) -> BatchEncoding: if text is None and images is None: raise ValueError("You must provide one of `text` or `images`.") if images is not None and text is None: raise ValueError( "You must provide `text` when `images` is provided. Minimal text consists of a single image token." ) output_kwargs = self._merge_kwargs( Lfm2VlProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise TypeError("Invalid input text. Please provide a string, or a list of strings") n_images_in_text = [sample.count(self.image_token) for sample in text] if sum(n_images_in_text) > 0 and images is None: raise ValueError(f"We detected {sum(n_images_in_text)} tokens in the text but no images were passed") inputs = {} use_image_special_tokens = output_kwargs["text_kwargs"].pop("use_image_special_tokens") if images is not None: images = self.image_processor.fetch_images(images) batched_images = make_nested_list_of_images(images) vision_inputs = self.image_processor(batched_images, **output_kwargs["images_kwargs"]) n_images_in_images = [len(sublist) for sublist in batched_images] if n_images_in_images != n_images_in_text: raise ValueError( f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same." ) text = self.expand_text_with_placeholders( text, batched_images, image_rows=vision_inputs.pop("image_rows"), image_cols=vision_inputs.pop("image_cols"), image_sizes=vision_inputs.pop("image_sizes"), use_image_special_tokens=use_image_special_tokens, **output_kwargs["images_kwargs"], ) inputs.update(vision_inputs) return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) inputs.update(text_inputs) return BatchFeature(inputs, tensor_type=return_tensors) def expand_text_with_placeholders( self, text: list[str], images: list[list[ImageInput]], image_rows: list[list[int]], image_cols: list[list[int]], image_sizes: list[list[int]], use_image_special_tokens: bool, **images_kwargs, ) -> list[str]: use_thumbnail = images_kwargs.get("use_thumbnail", self.image_processor.use_thumbnail) image_data = iter(zip(image_rows, image_cols, image_sizes)) prompt_strings = [] for sample_text, sample_images in zip(text, images): text_parts = sample_text.split(self.image_token) result_parts = [] for i, _ in enumerate(sample_images): result_parts.append(text_parts[i]) rows, cols, image_size = next(image_data) tokens_per_tile, tokens_for_image = self._get_image_num_tokens(image_size, **images_kwargs) image_tokens = self._build_image_tokens( rows, cols, tokens_per_tile, tokens_for_image, use_thumbnail, use_image_special_tokens, ) result_parts.append(image_tokens) # Add remaining text after the last image if len(sample_images) < len(text_parts): result_parts.append(text_parts[-1]) prompt_strings.append("".join(result_parts)) return prompt_strings def _build_image_tokens( self, rows: int, cols: int, tokens_per_tile: int, tokens_for_image: int, use_thumbnail: bool, use_image_special_tokens: bool, ) -> str: """Build the expanded token string for a single image.""" parts = [] if use_image_special_tokens: parts.append(self.image_start_token) is_multi_tile = rows > 1 or cols > 1 if is_multi_tile: for row in range(rows): for col in range(cols): if use_image_special_tokens: parts.append(f"<|img_row_{row + 1}_col_{col + 1}|>") parts.append(self.image_token * tokens_per_tile) if use_thumbnail: if use_image_special_tokens: parts.append(self.image_thumbnail_token) parts.append(self.image_token * tokens_for_image) else: parts.append(self.image_token * tokens_for_image) if use_image_special_tokens: parts.append(self.image_end_token) return "".join(parts) def _compute_tokens_per_tile(self, tile_size: int, encoder_patch_size: int, downsample_factor: int) -> int: """Compute the number of tokens for a single tile.""" num_patches = tile_size // encoder_patch_size downsampled_patches = math.ceil(num_patches / downsample_factor) return downsampled_patches * downsampled_patches def _compute_tokens_for_image(self, image_size: list[int], encoder_patch_size: int, downsample_factor: int) -> int: """Compute the number of tokens for a resized image (used for single-tile or thumbnail).""" image_height, image_width = image_size patches_h = math.ceil((image_height // encoder_patch_size) / downsample_factor) patches_w = math.ceil((image_width // encoder_patch_size) / downsample_factor) return patches_h * patches_w def _get_image_num_tokens(self, image_size: list[int], **images_kwargs) -> tuple[int, int]: """ Compute token counts for image processing. Returns: tuple[int, int]: (tokens_per_tile, tokens_for_image) - tokens_per_tile: tokens for each tile in multi-tile mode - tokens_for_image: tokens for the resized image (single-tile) or thumbnail (multi-tile) """ tile_size = images_kwargs.get("tile_size", self.image_processor.tile_size) downsample_factor = images_kwargs.get("downsample_factor", self.image_processor.downsample_factor) encoder_patch_size = images_kwargs.get("encoder_patch_size", self.image_processor.encoder_patch_size) tokens_per_tile = self._compute_tokens_per_tile(tile_size, encoder_patch_size, downsample_factor) tokens_for_image = self._compute_tokens_for_image(image_size, encoder_patch_size, downsample_factor) return tokens_per_tile, tokens_for_image def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LFM2Tokeniser's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ batched_decode_output = self.tokenizer.batch_decode(*args, **kwargs) return batched_decode_output def decode(self, *args, **kwargs): """ This method forwards all its arguments to LFM2Tokeniser's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ decode_output = self.tokenizer.decode(*args, **kwargs) return decode_output @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names # LFM2-VL has no dedicated tokenizer class and uses the Base class with default model input names tokenizer_input_names = [name for name in tokenizer_input_names if name != "token_type_ids"] return list(tokenizer_input_names + image_processor_input_names) __all__ = ["Lfm2VlProcessor"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/lfm2_vl/processing_lfm2_vl.py", "license": "Apache License 2.0", "lines": 225, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/lfm2_vl/test_image_processing_lfm2_vl.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image if is_torch_available(): import torch if is_torchvision_available(): from transformers import Lfm2VlImageProcessorFast from transformers.models.lfm2_vl.image_processing_lfm2_vl_fast import ( find_closest_aspect_ratio, round_by_factor, ) class Lfm2VlImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, num_images=1, min_resolution=256, max_resolution=1024, downsample_factor=2, do_image_splitting=False, min_tiles=2, max_tiles=10, use_thumbnail=True, min_image_tokens=64, max_image_tokens=256, encoder_patch_size=16, tile_size=512, max_pixels_tolerance=2.0, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_images = num_images self.min_resolution = min_resolution self.max_resolution = max_resolution self.downsample_factor = downsample_factor self.do_image_splitting = do_image_splitting self.min_tiles = min_tiles self.max_tiles = max_tiles self.use_thumbnail = use_thumbnail self.min_image_tokens = min_image_tokens self.max_image_tokens = max_image_tokens self.encoder_patch_size = encoder_patch_size self.tile_size = tile_size self.max_pixels_tolerance = max_pixels_tolerance def prepare_image_processor_dict(self): return { "downsample_factor": self.downsample_factor, "do_image_splitting": self.do_image_splitting, "min_tiles": self.min_tiles, "max_tiles": self.max_tiles, "use_thumbnail": self.use_thumbnail, "min_image_tokens": self.min_image_tokens, "max_image_tokens": self.max_image_tokens, "encoder_patch_size": self.encoder_patch_size, "tile_size": self.tile_size, "max_pixels_tolerance": self.max_pixels_tolerance, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) return [[image] for image in images] @require_torch @require_vision class Lfm2VlImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): test_slow_image_processor = False fast_image_processing_class = Lfm2VlImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Lfm2VlImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "downsample_factor")) self.assertTrue(hasattr(image_processing, "min_tiles")) self.assertTrue(hasattr(image_processing, "max_tiles")) self.assertTrue(hasattr(image_processing, "use_thumbnail")) self.assertTrue(hasattr(image_processing, "min_image_tokens")) self.assertTrue(hasattr(image_processing, "max_image_tokens")) self.assertTrue(hasattr(image_processing, "encoder_patch_size")) self.assertTrue(hasattr(image_processing, "tile_size")) self.assertTrue(hasattr(image_processing, "max_pixels_tolerance")) @require_vision def test_smart_resize(self): # verify that smart resize output dims are divisible by encoder_patch_size * downsample_factor image_processing = self.fast_image_processing_class(**self.image_processor_dict) width, height = image_processing.smart_resize( height=500, width=300, downsample_factor=image_processing.downsample_factor, min_image_tokens=image_processing.min_image_tokens, max_image_tokens=image_processing.max_image_tokens, encoder_patch_size=image_processing.encoder_patch_size, ) mod = image_processing.encoder_patch_size * image_processing.downsample_factor self.assertEqual(width % mod, 0) self.assertEqual(height % mod, 0) @require_vision def test_get_grid_layout(self): # splitting a 512×512 image into tiles of size processor.image_processor.tile_size image_processing = self.fast_image_processing_class(**self.image_processor_dict) rows, cols, _, _, num_patches = image_processing._get_grid_layout( height=1024, width=1024, min_tiles=image_processing.min_tiles, max_tiles=image_processing.max_tiles, tile_size=image_processing.tile_size, ) self.assertEqual(num_patches, 4) self.assertEqual(num_patches, rows * cols) rows, cols, _, _, num_patches = image_processing._get_grid_layout( height=1024, width=1024, min_tiles=8, max_tiles=8, tile_size=image_processing.tile_size, ) self.assertEqual(num_patches, 8) self.assertEqual(num_patches, rows * cols) def test_find_closest_aspect_ratio(self): # should pick (1,1) over (2,1) for a square image result = find_closest_aspect_ratio(1.0, [(1, 1), (2, 1)], width=100, height=100, image_size=100) self.assertEqual(result, (1, 1)) result = find_closest_aspect_ratio(0.5, [(1, 1), (1, 2)], width=100, height=200, image_size=200) self.assertEqual(result, (1, 2)) def test_call_numpy(self): # Initialize image_processing image_processing = self.fast_image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), ( self.image_processor_tester.batch_size, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2, ), ) def test_call_numpy_4_channels(self): # Lfm2Vl always processes images as RGB, so it always returns images with 3 channels # Initialize image_processing image_processor_dict = self.image_processor_dict image_processing = self.fast_image_processing_class(**image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), ( self.image_processor_tester.batch_size, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2, ), ) def test_call_pil(self): # Initialize image_processing image_processing = self.fast_image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for images in image_inputs: for image in images: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), ( self.image_processor_tester.batch_size, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2, ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.fast_image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for images in image_inputs: for image in images: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), ( self.image_processor_tester.batch_size, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2, ), ) def test_small_image_no_tiling_no_thumbnail(self): """Small image with tiling disabled should use smart resize, no thumbnail.""" image_processing = self.fast_image_processing_class( do_image_splitting=False, use_thumbnail=True, # even if enabled, should not be used for small/non-tiled images ) # Create a small image (256x256) small_image = Image.new("RGB", (256, 256), color="red") result = image_processing([[small_image]], return_tensors="pt", return_row_col_info=True) # With tiling disabled, should be 1 tile (no thumbnail) self.assertEqual(result.image_rows[0].item(), 1) self.assertEqual(result.image_cols[0].item(), 1) # Should have exactly 1 image in batch (no thumbnail) self.assertEqual(result.pixel_values.shape[0], 1) def test_small_image_tiling_enabled_no_thumbnail(self): """Small image with tiling enabled should not be tiled (too small), no thumbnail.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=True, min_tiles=2, max_tiles=10, ) # Create a small image that won't exceed the max_image_tokens threshold small_image = Image.new("RGB", (256, 256), color="blue") result = image_processing([[small_image]], return_tensors="pt", return_row_col_info=True) # Small image should not be tiled (1x1 grid), no thumbnail added self.assertEqual(result.image_rows[0].item(), 1) self.assertEqual(result.image_cols[0].item(), 1) # Should have exactly 1 image in batch (no thumbnail) self.assertEqual(result.pixel_values.shape[0], 1) def test_large_image_no_tiling_smart_resize(self): """Large image with tiling disabled should use smart resize, no thumbnail.""" image_processing = self.fast_image_processing_class( do_image_splitting=False, use_thumbnail=True, # even if enabled, should not be used ) # Create a large image (2048x2048) large_image = Image.new("RGB", (2048, 2048), color="green") result = image_processing([[large_image]], return_tensors="pt", return_row_col_info=True) # With tiling disabled, should be 1 tile even for large images self.assertEqual(result.image_rows[0].item(), 1) self.assertEqual(result.image_cols[0].item(), 1) # Should have exactly 1 image in batch (no thumbnail, smart resize only) self.assertEqual(result.pixel_values.shape[0], 1) def test_large_image_tiling_enabled_thumbnail_disabled(self): """Large image with tiling enabled but thumbnail disabled should tile without thumbnail.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=False, min_tiles=2, max_tiles=10, tile_size=512, ) # Create a large image that will require tiling large_image = Image.new("RGB", (2048, 2048), color="yellow") result = image_processing([[large_image]], return_tensors="pt", return_row_col_info=True) # Large image should be tiled into multiple tiles num_rows = result.image_rows[0].item() num_cols = result.image_cols[0].item() num_tiles = num_rows * num_cols self.assertGreater(num_tiles, 1, "Large image should be tiled into multiple tiles") # Count actual patches - with thumbnail disabled, should equal number of tiles num_images_in_batch = result.pixel_values.shape[0] self.assertEqual( num_images_in_batch, num_tiles, "Number of patches should equal number of tiles (no thumbnail)" ) def test_large_image_tiling_enabled_thumbnail_enabled(self): """Large image with tiling and thumbnail enabled should tile AND add thumbnail.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=True, min_tiles=2, max_tiles=10, tile_size=512, ) # Create a large image that will require tiling large_image = Image.new("RGB", (2048, 2048), color="purple") result = image_processing([[large_image]], return_tensors="pt", return_row_col_info=True) # Large image should be tiled into multiple tiles num_rows = result.image_rows[0].item() num_cols = result.image_cols[0].item() num_tiles = num_rows * num_cols self.assertGreater(num_tiles, 1, "Large image should be tiled into multiple tiles") # With thumbnail enabled, we should have tiles + 1 thumbnail num_images_in_batch = result.pixel_values.shape[0] self.assertEqual(num_images_in_batch, num_tiles + 1, "Number of patches should equal tiles + 1 (thumbnail)") # ==================== Non-Square Aspect Ratio Tests ==================== def test_landscape_image_aspect_ratio(self): """Test that landscape images (wider than tall) are processed correctly.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=True, min_tiles=2, max_tiles=10, tile_size=512, ) # Create a landscape image (1920x1080, ~16:9 aspect ratio) landscape_image = Image.new("RGB", (1920, 1080), color="blue") result = image_processing([[landscape_image]], return_tensors="pt", return_row_col_info=True) num_rows = result.image_rows[0].item() num_cols = result.image_cols[0].item() # Landscape image should have more columns than rows self.assertGreaterEqual(num_cols, num_rows, "Landscape image should have cols >= rows") def test_extreme_aspect_ratio_wide(self): """Test extremely wide image (panorama-like).""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=True, min_tiles=2, max_tiles=10, tile_size=512, ) # Create an extremely wide image (3000x500) wide_image = Image.new("RGB", (3000, 500), color="red") result = image_processing([[wide_image]], return_tensors="pt", return_row_col_info=True) num_rows = result.image_rows[0].item() num_cols = result.image_cols[0].item() # Very wide image should have significantly more cols than rows self.assertGreater(num_cols, num_rows, "Very wide image should have cols > rows") def test_extreme_aspect_ratio_tall(self): """Test extremely tall image.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=True, min_tiles=2, max_tiles=10, tile_size=512, ) # Create an extremely tall image (500x3000) tall_image = Image.new("RGB", (500, 3000), color="yellow") result = image_processing([[tall_image]], return_tensors="pt", return_row_col_info=True) num_rows = result.image_rows[0].item() num_cols = result.image_cols[0].item() # Very tall image should have significantly more rows than cols self.assertGreater(num_rows, num_cols, "Very tall image should have rows > cols") # ==================== Output Validation Tests ==================== def test_image_sizes_returned_with_row_col_info(self): """Test that image_sizes is returned when return_row_col_info=True.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) image = Image.new("RGB", (512, 256), color="green") result = image_processing([[image]], return_tensors="pt", return_row_col_info=True) # Check all row/col info is returned self.assertIn("image_rows", result) self.assertIn("image_cols", result) self.assertIn("image_sizes", result) # image_sizes should contain [height, width] for the resized image image_sizes = result.image_sizes self.assertIsInstance(image_sizes, torch.Tensor) self.assertEqual(image_sizes.shape[0], 1) # one sample self.assertEqual(image_sizes.shape[1], 2) # [height, width] def test_output_consistency_across_formats(self): """Test that outputs are consistent regardless of input format (PIL, numpy, torch).""" image_processing = self.fast_image_processing_class(do_image_splitting=False) # Create same image in different formats pil_image = Image.new("RGB", (256, 256), color="white") np_image = np.array(pil_image) torch_image = torch.from_numpy(np_image).permute(2, 0, 1) result_pil = image_processing([[pil_image]], return_tensors="pt") result_np = image_processing([[np_image]], return_tensors="pt") result_torch = image_processing([[torch_image]], return_tensors="pt") # All should produce same shapes self.assertEqual(result_pil.pixel_values.shape, result_np.pixel_values.shape) self.assertEqual(result_pil.pixel_values.shape, result_torch.pixel_values.shape) self.assertEqual(result_pil.spatial_shapes.tolist(), result_np.spatial_shapes.tolist()) self.assertEqual(result_pil.spatial_shapes.tolist(), result_torch.spatial_shapes.tolist()) # ==================== Multiple Images Per Sample Tests ==================== def test_multiple_images_per_sample(self): """Test processing multiple images in a single sample: [[img1, img2, img3]].""" image_processing = self.fast_image_processing_class(do_image_splitting=False) img1 = Image.new("RGB", (256, 256), color="red") img2 = Image.new("RGB", (256, 256), color="green") img3 = Image.new("RGB", (256, 256), color="blue") result = image_processing([[img1, img2, img3]], return_tensors="pt") # Should have 3 images processed self.assertEqual(result.pixel_values.shape[0], 3) self.assertEqual(result.spatial_shapes.shape[0], 3) self.assertEqual(result.pixel_attention_mask.shape[0], 3) def test_mixed_image_counts_across_batch(self): """Test batch with different number of images per sample: [[img1], [img2, img3]].""" image_processing = self.fast_image_processing_class(do_image_splitting=False) img1 = Image.new("RGB", (256, 256), color="red") img2 = Image.new("RGB", (256, 256), color="green") img3 = Image.new("RGB", (256, 256), color="blue") # First sample has 1 image, second sample has 2 images result = image_processing([[img1], [img2, img3]], return_tensors="pt") # Total should be 3 images (1 + 2) self.assertEqual(result.pixel_values.shape[0], 3) self.assertEqual(result.spatial_shapes.shape[0], 3) def test_multiple_images_different_sizes(self): """Test multiple images per sample with different sizes.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) img_small = Image.new("RGB", (256, 256), color="red") img_medium = Image.new("RGB", (512, 512), color="green") img_large = Image.new("RGB", (768, 768), color="blue") result = image_processing([[img_small, img_medium, img_large]], return_tensors="pt") # Should have 3 images processed self.assertEqual(result.pixel_values.shape[0], 3) # All should have same max_num_patches due to padding self.assertEqual(result.pixel_values.shape[1], image_processing.max_num_patches) # ==================== Parameter Variations Tests ==================== def test_forced_grid_config_min_equals_max(self): """Test forcing a specific grid configuration with min_tiles == max_tiles.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, min_tiles=4, max_tiles=4, # Force exactly 4 tiles tile_size=512, use_thumbnail=False, ) # Large image that would normally get more tiles wide_image = Image.new("RGB", (3000, 500), color="red") result = image_processing([[wide_image]], return_tensors="pt", return_row_col_info=True) num_rows = result.image_rows[0].item() num_cols = result.image_cols[0].item() num_tiles = num_rows * num_cols # Should be exactly 4 tiles self.assertEqual(num_tiles, 4, "Should have exactly 4 tiles when min_tiles == max_tiles == 4") # ==================== Input Validation Tests ==================== def test_min_tiles_greater_than_max_tiles_raises_error(self): """Test that min_tiles > max_tiles raises ValueError.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, min_tiles=10, max_tiles=2, # Invalid: min > max ) image = Image.new("RGB", (1024, 1024), color="red") with self.assertRaises(ValueError) as context: image_processing([[image]], return_tensors="pt") self.assertIn("min_tiles", str(context.exception).lower()) # ==================== Edge Case Images Tests ==================== def test_very_small_image(self): """Test image smaller than encoder_patch_size.""" image_processing = self.fast_image_processing_class( do_image_splitting=False, encoder_patch_size=16, ) # Image smaller than patch size tiny_image = Image.new("RGB", (8, 8), color="red") result = image_processing([[tiny_image]], return_tensors="pt") # Should still process without error self.assertIn("pixel_values", result) self.assertEqual(result.pixel_values.dim(), 3) def test_grayscale_image(self): """Test that grayscale (1-channel) images are converted to RGB.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) # Create grayscale image grayscale_image = Image.new("L", (256, 256), color=128) result = image_processing([[grayscale_image]], return_tensors="pt") # Should process and output 3 channels (converted to RGB) self.assertIn("pixel_values", result) # pixel_values shape is (batch, num_patches, patch_size^2 * 3) expected_patch_dim = 3 * image_processing.encoder_patch_size**2 self.assertEqual(result.pixel_values.shape[2], expected_patch_dim) def test_rgba_4_channel_image(self): """Test that RGBA (4-channel) images are converted to RGB.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) # Create RGBA image with alpha channel rgba_image = Image.new("RGBA", (256, 256), color=(255, 0, 0, 128)) result = image_processing([[rgba_image]], return_tensors="pt", do_convert_rgb=True) # Should process and output 3 channels (alpha dropped) self.assertIn("pixel_values", result) expected_patch_dim = 3 * image_processing.encoder_patch_size**2 self.assertEqual(result.pixel_values.shape[2], expected_patch_dim) def test_numpy_4_channel_rgba(self): """Test actual 4-channel numpy array input - convert to PIL for RGB conversion.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) # Create 4-channel numpy array (RGBA) and convert to PIL Image for RGB conversion rgba_np = np.random.randint(0, 255, (256, 256, 4), dtype=np.uint8) rgba_pil = Image.fromarray(rgba_np, mode="RGBA") result = image_processing([[rgba_pil]], return_tensors="pt", do_convert_rgb=True) # Should convert to 3 channels self.assertIn("pixel_values", result) expected_patch_dim = 3 * image_processing.encoder_patch_size**2 self.assertEqual(result.pixel_values.shape[2], expected_patch_dim) def test_single_pixel_image(self): """Test 1x1 pixel image (extreme edge case).""" image_processing = self.fast_image_processing_class(do_image_splitting=False) single_pixel = Image.new("RGB", (1, 1), color="blue") result = image_processing([[single_pixel]], return_tensors="pt") # Should process without error self.assertIn("pixel_values", result) # ==================== Helper Function Unit Tests ==================== def test_round_by_factor(self): """Test round_by_factor function.""" # Exact multiples should return themselves self.assertEqual(round_by_factor(32, 16), 32) self.assertEqual(round_by_factor(64, 16), 64) # Values should round to nearest multiple self.assertEqual(round_by_factor(30, 16), 32) # 30 -> 32 (closer to 32 than 16) self.assertEqual(round_by_factor(20, 16), 16) # 20 -> 16 (closer to 16 than 32) self.assertEqual(round_by_factor(24, 16), 32) # 24 -> 32 (equidistant, rounds up) # Test with different factors self.assertEqual(round_by_factor(100, 32), 96) # 100 -> 96 self.assertEqual(round_by_factor(50, 32), 64) # 50 -> 64 # Test with factor of 1 self.assertEqual(round_by_factor(17, 1), 17) def test_is_image_too_large_small_image(self): """Test _is_image_too_large with small image.""" image_processing = self.fast_image_processing_class( max_image_tokens=256, encoder_patch_size=16, downsample_factor=2, max_pixels_tolerance=2.0, ) is_large = image_processing._is_image_too_large( height=512, width=512, max_image_tokens=256, encoder_patch_size=16, downsample_factor=2, max_pixels_tolerance=2.0, ) self.assertFalse(is_large) def test_is_image_too_large_large_image(self): """Test _is_image_too_large with large image.""" image_processing = self.fast_image_processing_class( max_image_tokens=256, encoder_patch_size=16, downsample_factor=2, max_pixels_tolerance=1.0, ) is_large = image_processing._is_image_too_large( height=565, width=565, max_image_tokens=256, encoder_patch_size=16, downsample_factor=2, max_pixels_tolerance=1.0, ) self.assertTrue(is_large) # ==================== Batch Processing Tests ==================== def test_batch_mixed_image_sizes(self): """Test batch processing with different image sizes requiring different processing paths.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) # Create images with significantly different sizes small_image = Image.new("RGB", (256, 256), color="red") medium_image = Image.new("RGB", (512, 512), color="green") large_image = Image.new("RGB", (1024, 1024), color="blue") # Process as batch result = image_processing([[small_image], [medium_image], [large_image]], return_tensors="pt") # All should be processed and padded to same size self.assertEqual(result.pixel_values.shape[0], 3) # All should have same max_num_patches self.assertEqual(result.pixel_values.shape[1], image_processing.max_num_patches) # Patch dimension should be patch_size^2 * 3 channels expected_patch_dim = 3 * image_processing.encoder_patch_size**2 self.assertEqual(result.pixel_values.shape[2], expected_patch_dim) # Spatial shapes should all be square (equal height and width) shapes = result.spatial_shapes.tolist() for shape in shapes: self.assertEqual(shape[0], shape[1], "Square images should have equal height and width") # pixel_attention_mask should have correct shape self.assertEqual(result.pixel_attention_mask.shape[0], 3) self.assertEqual(result.pixel_attention_mask.shape[1], image_processing.max_num_patches) def test_batch_mixed_aspect_ratios(self): """Test batch with mixed aspect ratios.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) square = Image.new("RGB", (512, 512), color="red") landscape = Image.new("RGB", (1024, 512), color="green") portrait = Image.new("RGB", (512, 1024), color="blue") result = image_processing([[square], [landscape], [portrait]], return_tensors="pt") # All should be processed self.assertEqual(result.pixel_values.shape[0], 3) self.assertEqual(result.spatial_shapes.shape[0], 3) # Spatial shapes should reflect aspect ratios: [height, width] shapes = result.spatial_shapes.tolist() square_shape, landscape_shape, portrait_shape = shapes # Square: height == width self.assertEqual(square_shape[0], square_shape[1], "Square image should have equal spatial dimensions") # Landscape: width > height self.assertGreater(landscape_shape[1], landscape_shape[0], "Landscape image should have width > height") # Portrait: height > width self.assertGreater(portrait_shape[0], portrait_shape[1], "Portrait image should have height > width") # pixel_attention_mask should match batch size and max_num_patches self.assertEqual(result.pixel_attention_mask.shape[0], 3) self.assertEqual(result.pixel_attention_mask.shape[1], image_processing.max_num_patches) def test_disable_grouping_single_image(self): """Test disable_grouping parameter with single image.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) image = Image.new("RGB", (512, 512), color="purple") # Process with and without disable_grouping result_grouped = image_processing([[image]], return_tensors="pt", disable_grouping=False) result_ungrouped = image_processing([[image]], return_tensors="pt", disable_grouping=True) # Both should produce all expected output keys for result in [result_grouped, result_ungrouped]: self.assertIn("pixel_values", result) self.assertIn("spatial_shapes", result) self.assertIn("pixel_attention_mask", result) # Both should have same output shapes for single image self.assertEqual(result_grouped.pixel_values.shape, result_ungrouped.pixel_values.shape) self.assertEqual(result_grouped.spatial_shapes.shape, result_ungrouped.spatial_shapes.shape) self.assertEqual(result_grouped.pixel_attention_mask.shape, result_ungrouped.pixel_attention_mask.shape) # Verify specific shapes self.assertEqual(result_ungrouped.pixel_values.shape[0], 1) self.assertEqual(result_ungrouped.pixel_values.shape[1], image_processing.max_num_patches) expected_patch_dim = 3 * image_processing.encoder_patch_size**2 self.assertEqual(result_ungrouped.pixel_values.shape[2], expected_patch_dim) def test_disable_grouping_batch(self): """Test disable_grouping parameter with batch of images.""" image_processing = self.fast_image_processing_class(do_image_splitting=False) # Images of same size - normally would be grouped img1 = Image.new("RGB", (256, 256), color="red") img2 = Image.new("RGB", (256, 256), color="green") img3 = Image.new("RGB", (256, 256), color="blue") # Process with disable_grouping=True result = image_processing([[img1], [img2], [img3]], return_tensors="pt", disable_grouping=True) # Should produce valid output for all images self.assertEqual(result.pixel_values.shape[0], 3) def test_batch_with_tiling(self): """Test batch processing when some images need tiling.""" image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=True, min_tiles=2, max_tiles=4, tile_size=512, ) # Small image (no tiling needed) and large image (will be tiled) small = Image.new("RGB", (256, 256), color="red") large = Image.new("RGB", (1024, 1024), color="blue") # 2x2 tiles at 512 result = image_processing([[small], [large]], return_tensors="pt", return_row_col_info=True) # Calculate tiles for each image small_tiles = result.image_rows[0].item() * result.image_cols[0].item() large_tiles = result.image_rows[1].item() * result.image_cols[1].item() # Small image: single tile (no splitting needed) self.assertEqual(small_tiles, 1, "Small 256x256 image should have 1 tile (no splitting)") # Large image: 2x2 = 4 tiles for 1024x1024 with tile_size=512 self.assertEqual(large_tiles, 4, "Large 1536x1536 image should have 4 tiles (2x2)") # Total images: small (1) + large tiles (4) + thumbnail for large (1) = 6 # Thumbnail is only added when there's more than 1 tile expected_total = 1 + 4 + 1 # small + large_tiles + large_thumbnail self.assertEqual(result.pixel_values.shape[0], expected_total) self.assertEqual(result.spatial_shapes.shape[0], expected_total) self.assertEqual(result.pixel_attention_mask.shape[0], expected_total) def test_batch_tiling(self): """Test that patches from different images don't get mixed when batch processing with tiling. This test verifies that when processing a batch of images with tiling enabled, patches from image 0 don't end up in image 1's output and vice versa. This was a bug caused by incorrect permute/reshape operations in crop_image_to_patches. """ for use_thumbnail in [False, True]: with self.subTest(use_thumbnail=use_thumbnail): image_processing = self.fast_image_processing_class( do_image_splitting=True, use_thumbnail=use_thumbnail, min_tiles=2, max_tiles=4, tile_size=512, ) # Create two large images with completely different solid colors # Red image: RGB = (255, 0, 0) # Blue image: RGB = (0, 0, 255) red_image = Image.new("RGB", (1024, 1024), color=(255, 0, 0)) blue_image = Image.new("RGB", (1024, 1024), color=(0, 0, 255)) result = image_processing( [[red_image], [blue_image]], return_tensors="pt", return_row_col_info=True, do_rescale=False, # Keep original pixel values for easier verification do_normalize=False, ) # Each 1024x1024 image should be split into 2x2 = 4 tiles red_tiles = result.image_rows[0].item() * result.image_cols[0].item() blue_tiles = result.image_rows[1].item() * result.image_cols[1].item() self.assertEqual(red_tiles, 4) self.assertEqual(blue_tiles, 4) # Calculate expected total patches # Without thumbnail: 4 + 4 = 8 # With thumbnail: (4 + 1) + (4 + 1) = 10 thumb_count = 1 if use_thumbnail else 0 expected_total = (red_tiles + thumb_count) + (blue_tiles + thumb_count) self.assertEqual(result.pixel_values.shape[0], expected_total) pixel_values = result.pixel_values patch_size = image_processing.encoder_patch_size patches_per_image = red_tiles + thumb_count # Check red image patches (and thumbnail if enabled) # All should have high red, zero blue for i in range(patches_per_image): first_patch = pixel_values[i][0].view(3, patch_size, patch_size) red_mean = first_patch[0].float().mean().item() blue_mean = first_patch[2].float().mean().item() patch_type = "thumbnail" if use_thumbnail and i == red_tiles else f"patch {i}" self.assertGreater( red_mean, blue_mean, f"Red image {patch_type} has more blue than red - patches may be interleaved", ) # Check blue image patches (and thumbnail if enabled) # All should have high blue, zero red for i in range(patches_per_image, 2 * patches_per_image): first_patch = pixel_values[i][0].view(3, patch_size, patch_size) red_mean = first_patch[0].float().mean().item() blue_mean = first_patch[2].float().mean().item() local_idx = i - patches_per_image patch_type = "thumbnail" if use_thumbnail and local_idx == blue_tiles else f"patch {local_idx}" self.assertGreater( blue_mean, red_mean, f"Blue image {patch_type} has more red than blue - patches may be interleaved", )
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/lfm2_vl/test_image_processing_lfm2_vl.py", "license": "Apache License 2.0", "lines": 752, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/lfm2_vl/test_modeling_lfm2_vl.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the LFM2-VL model.""" import math import unittest from io import BytesIO import pytest import requests from transformers import AutoProcessor, is_torch_available from transformers.models.lfm2_vl.modeling_lfm2_vl import Lfm2VlForConditionalGeneration from transformers.testing_utils import ( cleanup, require_torch, require_torch_accelerator, slow, torch_device, ) from transformers.utils.import_utils import is_vision_available from ...causal_lm_tester import CausalLMModelTester from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_vision_available(): from PIL import Image if is_torch_available(): import torch from transformers import Lfm2VlConfig, Lfm2VlForConditionalGeneration, Lfm2VlModel from transformers.models.lfm2.modeling_lfm2 import Lfm2HybridConvCache class Lfm2VlModelTester(CausalLMModelTester): if is_torch_available(): config_class = Lfm2VlConfig base_model_class = Lfm2VlModel causal_lm_class = Lfm2VlForConditionalGeneration def __init__( self, parent, is_training=True, batch_size=2, scale_factor=2, num_images=2, vision_config={ "hidden_size": 32, "intermediate_size": 37, "num_hidden_layers": 2, "num_attention_heads": 2, "num_channels": 3, "num_patches": 16, "patch_size": 4, "hidden_act": "gelu_pytorch_tanh", "layer_norm_eps": 1e-6, "attention_dropout": 0.0, }, text_config={ "vocab_size": 100, "hidden_size": 32, "intermediate_size": 37, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 2, "max_position_embeddings": 100, "pad_token_id": 0, "bos_token_id": 1, "eos_token_id": 2, "tie_word_embeddings": True, "rope_theta": 1000000.0, "conv_bias": False, "conv_L_cache": 3, "block_multiple_of": 2, "full_attn_idxs": [0], }, image_token_id=4, downsample_factor=4, projector_hidden_size=32, ): super().__init__(parent) self.vision_config = vision_config self.text_config = text_config self.image_token_id = image_token_id self.is_training = is_training self.batch_size = batch_size self.scale_factor = scale_factor self.num_images = num_images self.downsample_factor = downsample_factor self.projector_hidden_size = projector_hidden_size self.image_seq_length = 4 def get_config(self): return Lfm2VlConfig( vision_config=self.vision_config, text_config=self.text_config, image_token_id=self.image_token_id, downsample_factor=self.downsample_factor, projector_hidden_size=self.projector_hidden_size, ) def prepare_config_and_inputs(self): # Create dummy pixel values: [num_images, num_patches, channels * patch_size^2] patch_size = self.vision_config["patch_size"] pixel_values = floats_tensor([self.num_images, 64, 3 * patch_size * patch_size]) # Spatial shapes: one (height_patches, width_patches) per image patches = int(math.sqrt(64)) spatial_shapes = torch.tensor([[patches, patches]] * self.num_images, dtype=torch.long, device=torch_device) # Pixel attention mask: mark all patches as valid (no padding) pixel_attention_mask = torch.ones((self.num_images, 64), dtype=torch.long, device=torch_device) config = self.get_config() return config, pixel_values, spatial_shapes, pixel_attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, spatial_shapes, pixel_attention_mask = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1 # For simplicity just set the last n tokens to the image token input_ids[input_ids == self.image_token_id] = self.text_config["pad_token_id"] input_ids[:, -self.image_seq_length :] = self.image_token_id attention_mask = input_ids.ne(1).to(torch_device) inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "spatial_shapes": spatial_shapes, "pixel_attention_mask": pixel_attention_mask, } return config, inputs_dict @require_torch class Lfm2VlModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (Lfm2VlModel, Lfm2VlForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Lfm2VlModel, "text-generation": Lfm2VlForConditionalGeneration, } if is_torch_available() else {} ) model_tester_class = Lfm2VlModelTester _is_composite = True test_torch_exportable = False def setUp(self): self.model_tester = Lfm2VlModelTester(self) common_properties = ["image_token_id", "projector_hidden_size"] self.config_tester = ConfigTester( self, config_class=Lfm2VlConfig, has_text_modality=False, common_properties=common_properties ) def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): self.assertIsInstance(past_key_values, Lfm2HybridConvCache) # (batch, kv heads, seq_length, head_dim) num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads) head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) attention_shape = (batch_size, num_heads, seq_length, head_dim) conv_shape = (batch_size, config.hidden_size, config.conv_L_cache) for i in range(config.num_hidden_layers): if config.layer_types[i] == "full_attention": self.assertEqual(past_key_values.key_cache[i].shape, attention_shape) self.assertEqual(past_key_values.value_cache[i].shape, attention_shape) else: self.assertEqual(past_key_values.conv_cache[i].shape, conv_shape) def _check_caches_are_equal(self, cache1: Lfm2HybridConvCache, cache2: Lfm2HybridConvCache): """Text model uses lfm2, which has non-standard cache""" if not isinstance(cache1, Lfm2HybridConvCache) or not isinstance(cache2, Lfm2HybridConvCache): raise ValueError("The wrong cache is being used!") if not len(cache1) == len(cache2): raise ValueError("Both caches do not have the same number of layers.") num_layers = len(cache1) for idx in range(num_layers): torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx]) torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx]) torch.testing.assert_close(cache1.conv_cache[idx], cache2.conv_cache[idx]) def test_config(self): self.config_tester.run_common_tests() @unittest.skip( "Lfm2 backbone alternates between attention and conv layers, so attention are only returned for attention layers" ) def test_attention_outputs(self): pass @unittest.skip( "Lfm2 backbone has a special cache format which is not compatible with compile as it has static address for conv cache" ) @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @pytest.mark.xfail(reason="This architecture seems to not compute gradients for some layer.") def test_training_gradient_checkpointing(self): super().test_training_gradient_checkpointing() @pytest.mark.xfail(reason="This architecture seems to not compute gradients for some layer.") def test_training_gradient_checkpointing_use_reentrant_false(self): super().test_training_gradient_checkpointing_use_reentrant_false() @pytest.mark.xfail(reason="This architecture seems to not compute gradients for some layer.") def test_training_gradient_checkpointing_use_reentrant_true(self): super().test_training_gradient_checkpointing_use_reentrant_true() @require_torch_accelerator @slow class Lfm2VlForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("LiquidAI/LFM2-VL-1.6B") self.processor.tokenizer.padding_side = "left" self.image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) self.image2 = Image.open( BytesIO( requests.get( "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ).content ) ) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_integration_test(self): model = Lfm2VlForConditionalGeneration.from_pretrained( "LiquidAI/LFM2-VL-1.6B", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = "<image>In this image, we see" images = self.image inputs = self.processor(text=text, images=images, return_tensors="pt") inputs.to(device=torch_device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_text = "In this image, we see a cat and a dog lying on a pink blanket. They are both sleeping peacefully. They are" self.assertEqual(generated_texts[0], expected_generated_text) def test_integration_test_high_resolution(self): model = Lfm2VlForConditionalGeneration.from_pretrained( "LiquidAI/LFM2-VL-1.6B", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = "<image>In this image, we see" images = self.image2 inputs = self.processor(text=text, images=images, return_tensors="pt") inputs.to(device=torch_device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_text = ( "In this image, we see the Statue of Liberty, standing tall on its pedestal. The statue is made of metal," ) self.assertEqual(generated_texts[0], expected_generated_text) def test_integration_test_batched(self): model = Lfm2VlForConditionalGeneration.from_pretrained( "LiquidAI/LFM2-VL-450M", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = ["<image>In this image, we see", "<image>In this image, we see a cat"] images = [[self.image2], [self.image]] inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True) inputs.to(device=torch_device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_text = [ "In this image, we see a panoramic view of the New York City skyline. The iconic Statics and the New York", "In this image, we see a cat that is lying on its side on a cat bed.", ] self.assertListEqual(generated_texts, expected_generated_text) @require_torch_accelerator @slow class Lfm2_5VlForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("LiquidAI/LFM2.5-VL-1.6B") self.processor.tokenizer.padding_side = "left" self.image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) self.image2 = Image.open( BytesIO( requests.get( "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ).content ) ) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_integration_test(self): model = Lfm2VlForConditionalGeneration.from_pretrained( "LiquidAI/LFM2.5-VL-1.6B", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = "<image>In this image, we see" images = self.image inputs = self.processor(text=text, images=images, return_tensors="pt") inputs.to(device=torch_device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_text = ( "In this image, we see two cats lying on a pink blanket. One cat is a tabby, and the other is a" ) self.assertEqual(generated_texts[0], expected_generated_text) def test_integration_test_high_resolution(self): model = Lfm2VlForConditionalGeneration.from_pretrained( "LiquidAI/LFM2.5-VL-1.6B", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = "<image>In this image, we see" images = self.image2 inputs = self.processor(text=text, images=images, return_tensors="pt") inputs.to(device=torch_device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_text = "In this image, we see the Statue of Liberty, an iconic symbol of freedom and democracy. It stands on Liberty Island in" self.assertEqual(generated_texts[0], expected_generated_text) def test_integration_test_batched(self): model = Lfm2VlForConditionalGeneration.from_pretrained( "LiquidAI/LFM2.5-VL-1.6B", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = ["<image>In this image, we see", "<image>In this image, we see"] images = [[self.image2], [self.image]] inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True) inputs.to(device=torch_device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_text = [ "In this image, we see the Statue of Liberty, an iconic symbol of freedom and democracy. It stands on Liberty Island in", "In this image, we see two cats lying on a pink blanket. One cat is a tabby, and the other is a", ] self.assertListEqual(generated_texts, expected_generated_text)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/lfm2_vl/test_modeling_lfm2_vl.py", "license": "Apache License 2.0", "lines": 333, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/lfm2_vl/test_processing_lfm2_vl.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest import numpy as np from transformers import Lfm2VlProcessor from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image if is_torchvision_available(): pass @require_torch @require_vision class Lfm2VlProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Lfm2VlProcessor @classmethod def _setup_image_processor(cls): image_processor_class = cls._get_component_class_from_processor("image_processor") return image_processor_class( tile_size=14, min_image_tokens=2, max_image_tokens=10, encoder_patch_size=2, do_image_splitting=False, ) @classmethod def _setup_tokenizer(cls): tokenizer_class = cls._get_component_class_from_processor("tokenizer") processor_kwargs = cls.prepare_processor_dict() return tokenizer_class.from_pretrained("LiquidAI/LFM2-VL-1.6B", **processor_kwargs) @classmethod def _setup_test_attributes(cls, processor): # Create images with different sizes cls.small_image = Image.new("RGB", (256, 256)) cls.large_image = Image.new("RGB", (512, 1024)) cls.high_res_image = Image.new("RGB", (1024, 1024)) cls.bos_token = processor.tokenizer.bos_token cls.image_token = processor.image_token cls.bos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.bos_token) cls.image_token_id = processor.image_token_id cls.image_start_token_id = processor.tokenizer.convert_tokens_to_ids(processor.image_start_token) cls.image_end_token_id = processor.tokenizer.convert_tokens_to_ids(processor.image_end_token) cls.padding_token_id = processor.tokenizer.pad_token_id cls.image_thumbnail_token_id = processor.tokenizer.convert_tokens_to_ids(processor.image_thumbnail_token) @staticmethod def prepare_processor_dict(): chat_template = ( "{{bos_token}}{% for message in messages %}" "{{'<|im_start|>' + message['role'] + '\n'}}" "{% if message['content'] is string %}" "{{ message['content'] }}" "{% else %}" "{% for content in message['content'] %}" "{% if content['type'] == 'image' %}" "{{ '<image>' }}" "{% elif content['type'] == 'text' %}" "{{ content['text'] }}" "{% endif %}" "{% endfor %}" "{% endif %}" "{{'<|im_end|>\n'}}" "{% endfor %}" "{% if add_generation_prompt %}" "{{'<|im_start|>assistant\n' }}" "{% endif %}" ) return {"chat_template": chat_template} @unittest.skip("Lfm2VlProcessor adds special tokens to the text") def test_tokenizer_defaults(self): pass # Override as Lfm2VL needs images/video to be an explicitly nested batch def prepare_image_inputs(self, batch_size=None): """This function prepares a list of PIL images for testing""" images = super().prepare_image_inputs(batch_size) if isinstance(images, (list, tuple)): images = [[image] for image in images] return images def get_split_image_expected_tokens(self, processor, image_rows, image_cols, add_thumbnail, image_seq_len): text_split_images = [self.image_start_token_id] num_patches_tile = processor.image_processor.tile_size // processor.image_processor.encoder_patch_size tile_seq_len = math.ceil(num_patches_tile / processor.image_processor.downsample_factor) ** 2 for n_h in range(image_rows): for n_w in range(image_cols): text_split_images += ( processor.tokenizer(f"<|img_row_{n_h + 1}_col_{n_w + 1}|>", add_special_tokens=False)["input_ids"] + [self.image_token_id] * tile_seq_len ) if add_thumbnail: text_split_images += [self.image_thumbnail_token_id] + [self.image_token_id] * image_seq_len text_split_images += [self.image_end_token_id] return text_split_images def test_process_interleaved_images_prompts_no_image_splitting_single_image(self): processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False) processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) image_str = "<image>" # Test that a single image is processed correctly inputs = processor(images=self.small_image, text=image_str) encoder_feature_dims = ( 3 * processor.image_processor.encoder_patch_size * processor.image_processor.encoder_patch_size ) self.assertEqual( np.array(inputs["pixel_values"]).shape, (1, processor.image_processor.max_num_patches, encoder_feature_dims), ) self.assertEqual( np.array(inputs["pixel_attention_mask"]).shape, (1, processor.image_processor.max_num_patches) ) self.assertListEqual(inputs["spatial_shapes"].tolist(), [[6, 6]]) # fmt: on def test_process_interleaved_images_prompts_no_image_splitting_single_image_with_text(self): processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False) processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) image_str = "<image>" text_str = "In this image, we see" text = image_str + text_str inputs = processor(text=text, images=self.small_image) # fmt: off tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False) expected_input_ids = [[self.image_start_token_id] + [self.image_token_id] * 9 + [self.image_end_token_id] + tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])]) encoder_feature_dims = 3 * processor.image_processor.encoder_patch_size * processor.image_processor.encoder_patch_size self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, processor.image_processor.max_num_patches, encoder_feature_dims)) self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (1, processor.image_processor.max_num_patches)) self.assertListEqual(inputs["spatial_shapes"].tolist(), [[6, 6]]) # fmt: on def test_process_interleaved_images_prompts_no_image_splitting_multiple_images(self): processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False) processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "In this image, we see" text = [ image_str + text_str_1, image_str + image_str + text_str_2, ] images = [[self.small_image], [self.small_image, self.small_image]] inputs = processor(text=text, images=images, padding=True) tokenized_sentence_1 = processor.tokenizer(text_str_1, add_special_tokens=False) tokenized_sentence_2 = processor.tokenizer(text_str_2, add_special_tokens=False) image_tokens = [self.image_start_token_id] + [self.image_token_id] * 9 + [self.image_end_token_id] expected_input_ids_1 = image_tokens + tokenized_sentence_1["input_ids"] expected_input_ids_2 = 2 * image_tokens + tokenized_sentence_2["input_ids"] # Pad the first input to match the second input pad_len = len(expected_input_ids_2) - len(expected_input_ids_1) padded_expected_input_ids_1 = [self.padding_token_id] * pad_len + expected_input_ids_1 self.assertEqual(inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2]) self.assertEqual( inputs["attention_mask"], [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)], ) encoder_feature_dims = ( 3 * processor.image_processor.encoder_patch_size * processor.image_processor.encoder_patch_size ) self.assertEqual( np.array(inputs["pixel_values"]).shape, (3, processor.image_processor.max_num_patches, encoder_feature_dims), ) self.assertEqual( np.array(inputs["pixel_attention_mask"]).shape, (3, processor.image_processor.max_num_patches) ) self.assertListEqual(inputs["spatial_shapes"].tolist(), [[6, 6], [6, 6], [6, 6]]) def test_process_interleaved_images_prompts_image_splitting(self): processor = self.get_processor() image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "bla, bla" text = [image_str + text_str_1, text_str_2 + image_str + image_str] images = [[self.small_image], [self.high_res_image, self.high_res_image]] inputs = processor( text=text, images=images, padding=True, padding_side="left", max_pixels_tolerance=2.0, use_thumbnail=True, do_image_splitting=True, ) tokenized_sentence_1 = processor.tokenizer(text_str_1, add_special_tokens=False) tokenized_sentence_2 = processor.tokenizer(text_str_2, add_special_tokens=False) small_image_tokens = self.get_split_image_expected_tokens(processor, 3, 3, True, 9) large_image_tokens = self.get_split_image_expected_tokens(processor, 3, 3, True, 9) high_res_image_tokens = self.get_split_image_expected_tokens(processor, 3, 3, True, 9) expected_input_ids_1 = small_image_tokens + tokenized_sentence_1["input_ids"] expected_input_ids_2 = tokenized_sentence_2["input_ids"] + large_image_tokens + high_res_image_tokens # Pad the first input to match the second input pad_len = len(expected_input_ids_2) - len(expected_input_ids_1) padded_expected_input_ids_1 = [self.padding_token_id] * pad_len + expected_input_ids_1 self.assertEqual(inputs["input_ids"][0], padded_expected_input_ids_1) self.assertEqual(inputs["input_ids"][1], expected_input_ids_2) self.assertEqual( inputs["attention_mask"], [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)], ) self.assertEqual(np.array(inputs["pixel_values"]).shape, (30, 49, 12)) self.assertEqual(np.array(inputs["pixel_attention_mask"]).shape, (30, 49)) self.assertListEqual(inputs["spatial_shapes"].tolist(), ([[7, 7]] * 9 + [[6, 6]]) * 3) def test_add_special_tokens_processor_image_splitting(self): processor = self.get_processor() image_str = "<image>" text_str = "In this image, we see" text = text_str + image_str # fmt: off inputs = processor(text=text, images=self.high_res_image, add_special_tokens=False, do_image_splitting=True) tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False) split_high_res_image_tokens = self.get_split_image_expected_tokens(processor, 3, 3, True, 9) expected_input_ids = [tokenized_sentence["input_ids"] + split_high_res_image_tokens] self.assertEqual(inputs["input_ids"], expected_input_ids) # fmt: on def test_add_special_tokens_processor_image_splitting_large_image(self): processor = self.get_processor() image_str = "<image>" text_str = "In this image, we see" text = text_str + image_str # fmt: off inputs = processor(text=text, images=self.large_image, add_special_tokens=False, max_pixels_tolerance=2.0, do_image_splitting=True) tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False) large_image_tokens = self.get_split_image_expected_tokens(processor, 4, 2, True, 8) expected_input_ids = [tokenized_sentence["input_ids"] + large_image_tokens] self.assertEqual(inputs["input_ids"], expected_input_ids) # fmt: on def test_add_special_tokens_processor_image_no_splitting(self): processor = self.get_processor() image_str = "<image>" text_str = "In this image, we see" text = image_str + text_str # fmt: off inputs = processor(text=text, images=self.high_res_image, add_special_tokens=False, use_image_special_tokens=True, do_image_splitting=False) tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False) split_high_res_image_tokens = [self.image_start_token_id] + [self.image_token_id] * 9 + [self.image_end_token_id] expected_input_ids = [split_high_res_image_tokens + tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) # fmt: on def test_process_interleaved_images_prompts_image_error(self): processor = self.get_processor() text = [ "This is a test sentence.", "In this other sentence we try some good things", ] images = [[self.small_image], [self.large_image]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [[self.small_image], []] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) text = [ "This is a test sentence.<image>", "In this other sentence we try some good things<image>", ] images = [[self.small_image], [self.large_image, self.high_res_image]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [[], [self.large_image]] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.small_image, self.large_image, self.high_res_image] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.small_image] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) text = [ "This is a test sentence.", "In this other sentence we try some good things<image>", ] images = [[self.small_image], []] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [[], [self.large_image]] processor(text=text, images=images, padding=True) images = [self.small_image, self.large_image] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) images = [self.small_image] with self.assertRaises(ValueError): processor(text=text, images=images, padding=True) def test_apply_chat_template(self): # Message contains content which a mix of lists with images and image urls and string messages = [ { "role": "user", "content": [ {"type": "text", "text": "What do these images show?"}, {"type": "image"}, {"type": "image"}, ], }, { "role": "assistant", "content": [ { "type": "text", "text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.", } ], }, {"role": "user", "content": [{"type": "text", "text": "And who is that?"}]}, ] processor = self.get_processor() # Make short sequence length to test that the fake tokens are added correctly rendered = processor.apply_chat_template(messages, add_generation_prompt=True) expected_rendered = ( "<|startoftext|><|im_start|>user\nWhat do these images show?<image><image><|im_end|>\n" "<|im_start|>assistant\nThe first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<|im_end|>\n" "<|im_start|>user\nAnd who is that?<|im_end|>\n" "<|im_start|>assistant\n" ) self.assertEqual(rendered, expected_rendered) def test_text_only_inference(self): """Test that the processor works correctly with text-only input.""" processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) text = "This is a simple text without images." inputs = processor(text=text) tokenized_sentence = processor.tokenizer(text, add_special_tokens=False) expected_input_ids = [tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])]) self.assertTrue("pixel_values" not in inputs) self.assertTrue("pixel_attention_mask" not in inputs) # Test batch of texts without image tokens texts = ["First text.", "Second piece of text."] batch_inputs = processor(text=texts, padding=True) tokenized_1 = processor.tokenizer(texts[0], add_special_tokens=False) tokenized_2 = processor.tokenizer(texts[1], add_special_tokens=False) expected_1 = tokenized_1["input_ids"] expected_2 = tokenized_2["input_ids"] # Pad the shorter sequence pad_len = len(expected_2) - len(expected_1) if pad_len > 0: padded_expected_1 = [self.padding_token_id] * pad_len + expected_1 expected_attention_1 = [0] * pad_len + [1] * len(expected_1) self.assertEqual(batch_inputs["input_ids"], [padded_expected_1, expected_2]) self.assertEqual(batch_inputs["attention_mask"], [expected_attention_1, [1] * len(expected_2)]) else: pad_len = -pad_len padded_expected_2 = [self.padding_token_id] * pad_len + expected_2 expected_attention_2 = [0] * pad_len + [1] * len(expected_2) self.assertEqual(batch_inputs["input_ids"], [expected_1, padded_expected_2]) self.assertEqual(batch_inputs["attention_mask"], [[1] * len(expected_1), expected_attention_2]) def test_missing_images_error(self): """Test that appropriate error is raised when images are referenced but not provided.""" processor = self.get_processor() # Test single text with image token but no image text = "Let me show you this image: <image> What do you think?" with self.assertRaises(ValueError) as context: processor(text=text) self.assertTrue("We detected 1 tokens in the text but no images were passed" in str(context.exception)) # Test batch with image tokens but no images texts = [ "First text with <image> token.", "Second text <image> with token.", ] with self.assertRaises(ValueError) as context: processor(text=texts) self.assertTrue("We detected 2 tokens in the text but no images were passed" in str(context.exception)) # Test with None as Images with self.assertRaises(ValueError) as context: processor(text=text, images=None) self.assertTrue("We detected 1 tokens in the text but no images were passed" in str(context.exception)) with self.assertRaises(ValueError) as context: processor(text=texts, images=None) self.assertTrue("We detected 2 tokens in the text but no images were passed" in str(context.exception)) def test_single_tile_image_with_thumbnail_disabled(self): """Test that single-tile images work correctly when use_thumbnail=False.""" processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False) processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) image_str = "<image>" text_str = "Describe this image." text = image_str + text_str # Test with use_thumbnail=False - this should still generate correct tokens inputs = processor(text=text, images=self.small_image, use_thumbnail=False) # Count image tokens in input_ids num_image_tokens = sum(1 for token_id in inputs["input_ids"][0] if token_id == self.image_token_id) # Verify we have image tokens (the bug caused 0 tokens) self.assertGreater(num_image_tokens, 0, "Single-tile image with use_thumbnail=False should have image tokens") # Verify the number of image tokens matches expected based on spatial_shapes spatial_shape = inputs["spatial_shapes"][0].tolist() expected_tokens = math.ceil(spatial_shape[0] / processor.image_processor.downsample_factor) * math.ceil( spatial_shape[1] / processor.image_processor.downsample_factor ) self.assertEqual( num_image_tokens, expected_tokens, f"Image tokens ({num_image_tokens}) should match expected ({expected_tokens}) based on spatial shapes", ) # Verify pixel_values shape is correct encoder_feature_dims = ( 3 * processor.image_processor.encoder_patch_size * processor.image_processor.encoder_patch_size ) self.assertEqual( np.array(inputs["pixel_values"]).shape, (1, processor.image_processor.max_num_patches, encoder_feature_dims), ) def test_multi_image(self): """Test that text is correctly processed when multiple images are present.""" processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False) processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) # Text with multiple images and text segments between them text_1 = "First: " text_2 = " Middle: " text_3 = " End." text = text_1 + "<image>" + text_2 + "<image>" + text_3 images = [[self.small_image, self.small_image]] inputs = processor(text=text, images=images) # Construct expected input_ids tokenized_1 = processor.tokenizer(text_1, add_special_tokens=False)["input_ids"] tokenized_2 = processor.tokenizer(text_2, add_special_tokens=False)["input_ids"] tokenized_3 = processor.tokenizer(text_3, add_special_tokens=False)["input_ids"] image_tokens = [self.image_start_token_id] + [self.image_token_id] * 9 + [self.image_end_token_id] expected_input_ids = tokenized_1 + image_tokens + tokenized_2 + image_tokens + tokenized_3 self.assertEqual(inputs["input_ids"], [expected_input_ids]) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids)]) def test_multi_turn_multi_image(self): """Test that text is correctly processed when multiple images are present in a multi-turn conversation.""" processor_components = self.prepare_components() processor_components["tokenizer"] = self.get_component("tokenizer", padding_side="left") processor_components["image_processor"] = self.get_component("image_processor", do_image_splitting=False) processor_kwargs = self.prepare_processor_dict() processor = self.processor_class(**processor_components, **processor_kwargs) # Simulate a multi-turn conversation with images messages = [ { "role": "user", "content": [ {"type": "text", "text": "What is in image A?"}, {"type": "image"}, {"type": "text", "text": "And image B?"}, {"type": "image"}, ], }, { "role": "assistant", "content": [{"type": "text", "text": "Image A shows X. Image B shows Y."}], }, { "role": "user", "content": [{"type": "text", "text": "Tell me more about image A."}], }, ] text = processor.apply_chat_template(messages, add_generation_prompt=True) images = [[self.small_image, self.small_image]] inputs = processor(text=text, images=images, do_image_splitting=False) # Construct expected input_ids based on the chat template structure image_tokens = [self.image_start_token_id] + [self.image_token_id] * 9 + [self.image_end_token_id] # Build expected sequence from chat template parts bos = processor.tokenizer(self.bos_token, add_special_tokens=False)["input_ids"] user_start = processor.tokenizer("<|im_start|>user\n", add_special_tokens=False)["input_ids"] assistant_start = processor.tokenizer("<|im_start|>assistant\n", add_special_tokens=False)["input_ids"] im_end = processor.tokenizer("<|im_end|>\n", add_special_tokens=False)["input_ids"] text_a = processor.tokenizer("What is in image A?", add_special_tokens=False)["input_ids"] text_b = processor.tokenizer("And image B?", add_special_tokens=False)["input_ids"] assistant_response = processor.tokenizer("Image A shows X. Image B shows Y.", add_special_tokens=False)[ "input_ids" ] followup = processor.tokenizer("Tell me more about image A.", add_special_tokens=False)["input_ids"] expected_input_ids = ( bos + user_start + text_a + image_tokens + text_b + image_tokens + im_end + assistant_start + assistant_response + im_end + user_start + followup + im_end + assistant_start ) self.assertEqual(inputs["input_ids"], [expected_input_ids]) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids)])
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/lfm2_vl/test_processing_lfm2_vl.py", "license": "Apache License 2.0", "lines": 504, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/flex_olmo/modular_flex_olmo.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig from ...masking_utils import create_causal_mask from ...modeling_outputs import MoeModelOutputWithPast from ...modeling_rope_utils import RopeParameters from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring from ...utils.generic import merge_with_config_defaults from ...utils.output_capturing import OutputRecorder, capture_outputs from ..mixtral.modeling_mixtral import MixtralModel, MixtralPreTrainedModel from ..olmo2.modeling_olmo2 import Olmo2Attention, Olmo2RMSNorm, Olmo2RotaryEmbedding from ..olmoe.modeling_olmoe import ( OlmoeDecoderLayer, OlmoeForCausalLM, OlmoeMLP, OlmoeSparseMoeBlock, OlmoeTopKRouter, ) class FlexOlmoConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`FlexOlmoModel`]. It is used to instantiate an FlexOlmo model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/FlexOlmo-7x7B-1T](https://huggingface.co/allenai/FlexOlmo-7x7B-1T). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 100352): Vocabulary size of the FlexOlmo model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FlexOlmoModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 100277): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 100257): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_experts_per_tok (`int`, *optional*, defaults to 5): Number of selected experts. num_experts (`int`, *optional*, defaults to 7): Number of routed experts. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabling this will also allow the model to output the auxiliary loss, including load balancing loss and router z-loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.01): The aux loss factor for the total loss. norm_topk_prob (`bool`, *optional*, defaults to `False`): Whether to normalize the topk probabilities. ```python >>> from transformers import FlexOlmoModel, FlexOlmoConfig >>> # Initializing a FlexOlmo style configuration >>> configuration = FlexOlmoConfig() >>> # Initializing a model from the FlexOlmo style configuration >>> model = FlexOlmoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flex_olmo" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_local_experts": "num_experts"} default_theta = 500000.0 base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.k_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.v_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.o_proj": "rowwise_split_input", # input is replicated due to the added norm on q and k "layers.*.mlp.experts.gate_up_proj": "rowwise", "layers.*.mlp.experts.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 100352, hidden_size: int | None = 4096, intermediate_size: int | None = 11008, num_hidden_layers: int | None = 32, num_attention_heads: int | None = 32, num_key_value_heads: int | None = None, hidden_act: str | None = "silu", max_position_embeddings: int | None = 4096, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-06, use_cache: bool | None = True, pad_token_id: int | None = 100277, bos_token_id: int | None = None, eos_token_id: int | None = 100257, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, num_experts_per_tok: int | None = 5, num_experts: int | None = 7, output_router_logits: bool | None = False, router_aux_loss_coef: float | None = 0.01, norm_topk_prob: bool | None = False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.norm_topk_prob = norm_topk_prob self.rope_parameters = rope_parameters self.tie_word_embeddings = tie_word_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(**kwargs) # FlexOlmo RMS norm reuses Olmo2 RMS norm, which handles low precision slightly differently than the original Olmoe. class FlexOlmoRMSNorm(Olmo2RMSNorm): pass # FlexOlmo RMS norm reuses Olmo2 RMS norm, so that the output cos and sin are returned # as float32 rather than the input type. class FlexOlmoRotaryEmbedding(Olmo2RotaryEmbedding): pass class FlexOlmoMLP(OlmoeMLP): pass # FlexOlmo uses Olmo2 attention instead of OlmoE Attention since its `apply_rotary_pos_emb` # implementation handles lower precision more faithfully to the Olmo codebase. class FlexOlmoAttention(Olmo2Attention): pass class FlexOlmoTopKRouter(OlmoeTopKRouter): pass class FlexOlmoSparseMoeBlock(OlmoeSparseMoeBlock): pass # FlexOlmo decoder layer is identical to OlmoE decoder layer except: # - Norm is applied after attention/feedforward rather than before. class FlexOlmoDecoderLayer(OlmoeDecoderLayer): def __init__(self, config: FlexOlmoConfig, layer_idx: int): super().__init__(config, layer_idx=layer_idx) self.post_attention_layernorm = FlexOlmoRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = FlexOlmoRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.self_attn = FlexOlmoAttention(config=config, layer_idx=layer_idx) del self.input_layernorm def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs, ) -> torch.FloatTensor: residual = hidden_states # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = self.post_feedforward_layernorm(hidden_states) hidden_states = residual + hidden_states return hidden_states # FlexOlmo uses Mixtral model as its base instead of OlmoE model since Mixtral is more up-to-date with the rest # of the transformers library. For example, it uses the newer mechanisms of recording submodule outputs. class FlexOlmoPreTrainedModel(MixtralPreTrainedModel): _can_record_outputs = { "router_logits": OutputRecorder(FlexOlmoTopKRouter, index=0), "hidden_states": FlexOlmoDecoderLayer, "attentions": FlexOlmoAttention, } # FlexOlmo uses Mixtral model as its base instead of OlmoE model since Mixtral is more up-to-date with the rest # of the transformers library. For example, it uses the newer mechanisms of recording submodule outputs. # FlexOlmo model is identical to Mixtral model except: # - FlexOlmo does not use sliding window attention. class FlexOlmoModel(MixtralModel): @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) class FlexOlmoForCausalLM(OlmoeForCausalLM): pass __all__ = [ "FlexOlmoConfig", "FlexOlmoForCausalLM", "FlexOlmoModel", "FlexOlmoPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/flex_olmo/modular_flex_olmo.py", "license": "Apache License 2.0", "lines": 307, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/flex_olmo/test_modeling_flex_olmo.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch FlexOlmo model.""" import unittest from transformers import is_torch_available from transformers.models.auto.tokenization_auto import AutoTokenizer from transformers.testing_utils import ( Expectations, cleanup, require_torch, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( FlexOlmoForCausalLM, FlexOlmoModel, ) class FlexOlmoModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = FlexOlmoModel @require_torch class FlexOlmoModelTest(CausalLMModelTest, unittest.TestCase): test_all_params_have_gradient = False model_tester_class = FlexOlmoModelTester # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = FlexOlmoForCausalLM if is_torch_available() else None @require_torch class FlexOlmoIntegrationTest(unittest.TestCase): def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_model_7b_logits(self): input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]] model = FlexOlmoForCausalLM.from_pretrained("shanearora/Flex-reddit-2x7B-1T").to( torch_device, dtype=torch.bfloat16 ) out = model(torch.tensor(input_ids, device=torch_device)).logits.float() # Expected mean on dim = -1 expectations = Expectations( { ("cuda", 8): [[-5.4202, -5.3883, -2.3924, -2.1226, -6.0122, -5.4173, -5.4571, -5.8256]], } ) EXPECTED_MEAN = torch.tensor(expectations.get_expectation(), device=torch_device) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] expectations = Expectations( { ("cuda", 8): [ 0.5547, -3.6250, -7.2812, -5.0312, -5.9062, -5.3438, -4.2500, -4.6875, -3.4219, -4.6250, -6.5938, -3.1250, -6.0625, -2.0781, -6.4688, -0.4941, 1.2656, 0.7578, -0.1934, -0.4160, -0.6992, -0.9531, -0.9648, -1.3125, -1.2578, -4.5625, -2.4219, -5.6250, 0.7695, -4.5938], } ) # fmt: skip EXPECTED_SLICE = torch.tensor(expectations.get_expectation(), device=torch_device) torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_model_7b_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """Simply put, the theory of relativity states that 1) the laws of physics are the same in all inertial frames of reference, and 2) the speed of light is constant in all inertial frames of reference. The first statement is called the principle of relativity, and the second is called the constancy of the speed of light. The first statement is""" prompt = "Simply put, the theory of relativity states that " tokenizer = AutoTokenizer.from_pretrained("allenai/dolma2-tokenizer", device_map="auto") model = FlexOlmoForCausalLM.from_pretrained("shanearora/Flex-reddit-2x7B-1T", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/flex_olmo/test_modeling_flex_olmo.py", "license": "Apache License 2.0", "lines": 83, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/longcat_flash/configuration_longcat_flash.py
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LongCat Flash model configuration""" from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters class LongcatFlashConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`LongcatFlashModel`]. It is used to instantiate a LongCat Flash model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongCat Flash architecture. e.g. [meituan-longcat/LongCat-Flash-Chat](https://huggingface.co/meituan-longcat/LongCat-Flash-Chat) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 131072): Vocabulary size of the LongCat Flash model. Defines the number of different tokens that can be represented by the `input_ids` passed when calling [`LongcatFlashModel`] hidden_size (`int`, *optional*, defaults to 6144): Dimension of the hidden representations. num_hidden_layers (`int`, *optional*, defaults to 56): Number of hidden layers in the Transformer decoder. num_layers (`int`, *optional*, defaults to 28): number of layers, each with 2 sublayers. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting from a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon value used by the RMS normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie input and output embeddings. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ffn_hidden_size (`int`, *optional*, defaults to 12288): Dimension of the MLP representations. q_lora_rank (`int`, *optional*, defaults to 1536): The rank of the query LoRA projection in MLA (Multi-head Latent Attention). kv_lora_rank (`int`, *optional*, defaults to 512): The rank of the key-value LoRA projection in MLA. qk_nope_head_dim (`int`, *optional*, defaults to 128): The dimension of the non-position encoding part of query/key heads. qk_rope_head_dim (`int`, *optional*, defaults to 64): The dimension of the RoPE part of query/key heads. head_dim (`int`, *optional*, defaults to 64): Standard dimension of qk heads, unused except for CI. v_head_dim (`int`, *optional*, defaults to 128): The dimension of value heads. qk_head_dim (`int`, *optional*): The total dimension of query/key heads. If not specified, set to `qk_nope_head_dim + qk_rope_head_dim`. moe_topk (`int`, *optional*, defaults to 12): Number of experts to route to for each token in the MoE layer. n_routed_experts (`int`, *optional*, defaults to 512): Number of routed experts in the MoE layer. zero_expert_num (`int`, *optional*, defaults to 256): Number of zero experts (identity function) to add to the expert pool. expert_ffn_hidden_size (`int`, *optional*, defaults to 2048): Hidden size of individual expert FFN layers. routed_scaling_factor (`float`, *optional*, defaults to 6.0): Scaling factor applied to the routing weights. ```python >>> from transformers import LongcatFlashModel, LongcatFlashConfig >>> # Initializing a LongCat Flash style configuration >>> configuration = LongcatFlashConfig() >>> # Initializing a model from the configuration >>> model = LongcatFlashModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "longcat_flash" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 10000000.0 base_model_tp_plan = { "layers.*.self_attn.*.q_b_proj": "colwise", "layers.*.self_attn.*.kv_b_proj": "colwise", "layers.*.self_attn.*.o_proj": "rowwise", "layers.*.mlps.*.gate_proj": "colwise", "layers.*.mlps.*.up_proj": "colwise", "layers.*.mlps.*.down_proj": "rowwise", "layers.*.mlp.experts.gate_up_proj": "rowwise", "layers.*.mlp.experts.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 131072, hidden_size: int | None = 6144, num_hidden_layers: int | None = 56, num_layers: int | None = 28, num_attention_heads: int | None = 64, num_key_value_heads: int | None = None, hidden_act: str | None = "silu", max_position_embeddings: int | None = 131072, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-5, use_cache: bool | None = True, pad_token_id: int | None = None, bos_token_id: int | None = 1, eos_token_id: int | None = 2, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, ffn_hidden_size: int | None = 12288, q_lora_rank: int | None = 1536, kv_lora_rank: int | None = 512, qk_nope_head_dim: int | None = 128, qk_rope_head_dim: int | None = 64, head_dim: int | None = 64, v_head_dim: int | None = 128, qk_head_dim: int | None = None, moe_topk: int | None = 12, n_routed_experts: int | None = 512, zero_expert_num: int | None = 256, expert_ffn_hidden_size: int | None = 2048, routed_scaling_factor: float | None = 6.0, **kwargs, ): if num_key_value_heads is None: num_key_value_heads = num_attention_heads if qk_head_dim is None: qk_head_dim = qk_nope_head_dim + qk_rope_head_dim self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_layers = num_layers self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.ffn_hidden_size = ffn_hidden_size self.q_lora_rank = q_lora_rank self.kv_lora_rank = kv_lora_rank self.qk_nope_head_dim = qk_nope_head_dim self.qk_rope_head_dim = qk_rope_head_dim self.v_head_dim = v_head_dim self.qk_head_dim = qk_head_dim self.head_dim = head_dim self.moe_topk = moe_topk self.n_routed_experts = n_routed_experts self.zero_expert_num = zero_expert_num self.expert_ffn_hidden_size = expert_ffn_hidden_size self.routed_scaling_factor = routed_scaling_factor self.rope_parameters = rope_parameters self.tie_word_embeddings = tie_word_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(**kwargs) def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation: set | None = None, **kwargs): rope_scaling = kwargs.pop("rope_scaling", None) self.rope_parameters = rope_scaling or self.rope_parameters self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else {} # Standardize and validate the correctness of rotary position embeddings parameters self.rope_parameters.setdefault("rope_theta", kwargs.pop("rope_theta", self.default_theta)) self.standardize_rope_params() self.validate_rope(ignore_keys=ignore_keys_at_rope_validation) # Convert to float because RoPE fn expect a float. Models on the hub were saved as int for key in ["beta_fast", "beta_slow", "factor"]: if key in self.rope_parameters: self.rope_parameters[key] = float(self.rope_parameters[key]) return kwargs __all__ = ["LongcatFlashConfig"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/longcat_flash/configuration_longcat_flash.py", "license": "Apache License 2.0", "lines": 211, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/longcat_flash/modular_longcat_flash.py
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable import torch import torch.nn.functional as F from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import is_flash_attention_requested from ..deepseek_v3.modeling_deepseek_v3 import ( DeepseekV3Attention, DeepseekV3ForCausalLM, DeepseekV3MLP, DeepseekV3Model, DeepseekV3RMSNorm, DeepseekV3RotaryEmbedding, DeepseekV3TopkRouter, apply_rotary_pos_emb_interleave, eager_attention_forward, ) from .configuration_longcat_flash import LongcatFlashConfig logger = logging.get_logger(__name__) class LongcatFlashRMSNorm(DeepseekV3RMSNorm): pass class LongcatFlashRotaryEmbedding(DeepseekV3RotaryEmbedding): pass # TODO remap config key ffn_hidden_size -> intermediate_size class LongcatFlashMLP(DeepseekV3MLP): def __init__(self, config, hidden_size=None, intermediate_size=None): super().__init__(config) self.hidden_size = config.hidden_size if hidden_size is None else hidden_size self.intermediate_size = config.ffn_hidden_size if intermediate_size is None else intermediate_size # TODO remap config key moe_topk -> num_experts_per_tok class LongcatFlashTopkRouter(DeepseekV3TopkRouter): def __init__(self, config): super().__init__(config) del self.n_group del self.topk_group del self.weight del self.norm_topk_prob self.top_k = config.moe_topk self.n_routed_experts = config.n_routed_experts + (config.zero_expert_num or 0) self.routed_scaling_factor = config.routed_scaling_factor self.register_buffer("e_score_correction_bias", torch.zeros(self.n_routed_experts)) self.router_bias = getattr(config, "router_bias", False) self.classifier = nn.Linear(config.hidden_size, self.n_routed_experts, bias=self.router_bias) @torch.no_grad() def get_topk_indices(self, scores): scores_for_choice = scores.view(-1, self.n_routed_experts) + self.e_score_correction_bias.unsqueeze(0) topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1] return topk_indices def forward(self, hidden_states): hidden_states = hidden_states.view(-1, self.config.hidden_size) router_logits = F.linear(hidden_states.type(torch.float32), self.classifier.weight.type(torch.float32)) scores = router_logits.softmax(dim=-1) topk_indices = self.get_topk_indices(scores) topk_weights = scores.gather(1, topk_indices) topk_weights = topk_weights * self.routed_scaling_factor return topk_weights.to(router_logits.dtype), topk_indices class LongcatFlashExperts(nn.Module): def __init__(self, config): super().__init__() self.intermediate_size = config.expert_ffn_hidden_size self.hidden_size = config.hidden_size self.num_routed_experts = config.n_routed_experts self.zero_expert_num = config.zero_expert_num or 0 self.total_experts = self.num_routed_experts + self.zero_expert_num self.act_fn = ACT2FN[config.hidden_act] if self.num_routed_experts > 0: self.gate_up_proj = nn.Parameter( torch.empty(self.total_experts, 2 * self.intermediate_size, self.hidden_size) ) self.down_proj = nn.Parameter( torch.empty(self.num_routed_experts, self.hidden_size, self.intermediate_size) ) else: self.register_parameter("gate_up_proj", None) self.register_parameter("down_proj", None) def forward(self, hidden_states, top_k_index, top_k_weights): final_hidden_states = torch.zeros_like(hidden_states) if top_k_index.numel() == 0: return final_hidden_states expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.total_experts).permute(2, 1, 0) expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero(as_tuple=False) for expert_idx_tensor in expert_hit: expert_idx = int(expert_idx_tensor.item()) selection_idx, token_idx = torch.where(expert_mask[expert_idx].squeeze(0)) if token_idx.numel() == 0: continue current_state = hidden_states[token_idx] if expert_idx >= self.num_routed_experts or self.gate_up_proj is None: current_hidden_states = current_state else: gate, up = F.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1) current_hidden_states = self.act_fn(gate) * up current_hidden_states = F.linear(current_hidden_states, self.down_proj[expert_idx]) current_hidden_states = current_hidden_states * top_k_weights[token_idx, selection_idx, None] final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(hidden_states.dtype)) return final_hidden_states # remap config key expert_ffn_hidden_size -> moe_intermediate_size class LongcatFlashMoE(nn.Module): """ A mixed expert module containing zero compute (identity) experts. """ def __init__(self, config): super().__init__() self.intermediate_size = config.expert_ffn_hidden_size self.config = config self.experts = LongcatFlashExperts(config) self.router = LongcatFlashTopkRouter(config) def forward(self, hidden_states): orig_shape = hidden_states.shape topk_weights, topk_indices = self.router(hidden_states) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape) return hidden_states class LongcatFlashMLA(DeepseekV3Attention): def __init__(self, config, layer_idx: int): super().__init__(config, layer_idx) self.mla_scale_q_lora = (config.hidden_size / self.q_lora_rank) ** 0.5 self.mla_scale_kv_lora = (config.hidden_size / self.kv_lora_rank) ** 0.5 def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]: batch_size, seq_length = hidden_states.shape[:-1] query_shape = (batch_size, seq_length, -1, self.qk_head_dim) key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim) # we always do a lora for queries as well q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) q_states = q_states.view(query_shape).transpose(1, 2) q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) compressed_kv = self.kv_a_proj_with_mqa(hidden_states) k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) k_pass = self.kv_a_layernorm(k_pass) # apply LoRA scaling q_pass = q_pass * self.mla_scale_q_lora q_rot = q_rot * self.mla_scale_q_lora k_pass = k_pass * self.mla_scale_kv_lora k_pass = self.kv_b_proj(k_pass).view(key_shape).transpose(1, 2) k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1) k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim) cos, sin = position_embeddings q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin) k_rot = k_rot.expand(*k_pass.shape[:-1], -1) query_states = torch.cat((q_pass, q_rot), dim=-1) key_states = torch.cat((k_pass, k_rot), dim=-1) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim: value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim]) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim: attn_output = attn_output[:, :, :, : self.v_head_dim] attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class LongcatFlashDecoderLayer(GradientCheckpointingLayer): """ LongCat decoder layer with dual-sublayer + shortcut MoE architecture. Each logical layer contains: - 2 attention sublayers (with layer indices: layer_idx*2, layer_idx*2+1) - 2 MLP sublayers - 1 shortcut MoE connection """ def __init__(self, config, layer_idx: int): super().__init__() self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.mlp = LongcatFlashMoE(config) self.self_attn = nn.ModuleList([LongcatFlashMLA(config=config, layer_idx=layer_idx * 2 + i) for i in [0, 1]]) self.mlps = nn.ModuleList([LongcatFlashMLP(config) for _ in [0, 1]]) self.input_layernorm = nn.ModuleList( [LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]] ) self.post_attention_layernorm = nn.ModuleList( [LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]] ) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, use_cache: bool | None = False, cache_position: torch.LongTensor | None = None, position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm[0](hidden_states) hidden_states, _ = self.self_attn[0]( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm[0](hidden_states) shortcut_mlp_output = self.mlp(hidden_states) hidden_states = self.mlps[0](hidden_states) hidden_states = residual + hidden_states # shortcut connection after second sublayer residual = hidden_states hidden_states = self.input_layernorm[1](hidden_states) hidden_states, _ = self.self_attn[1]( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm[1](hidden_states) hidden_states = self.mlps[1](hidden_states) hidden_states = residual + hidden_states + shortcut_mlp_output return hidden_states @auto_docstring class LongcatFlashPreTrainedModel(PreTrainedModel): config: LongcatFlashConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LongcatFlashDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = False _supports_attention_backend = True _can_record_outputs = { "hidden_states": LongcatFlashDecoderLayer, "attentions": LongcatFlashMLA, } _keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"] @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, LongcatFlashTopkRouter): init.normal_(module.classifier.weight, mean=0.0, std=self.config.initializer_range) init.zeros_(module.e_score_correction_bias) if isinstance(module, LongcatFlashExperts): if module.gate_up_proj is not None: init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) class LongcatFlashModel(DeepseekV3Model): def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList( [LongcatFlashDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)] ) # Each layer above has 2 sublayers, config hack to have a correct cache (to avoid a checkpoint change) self.head_dim = config.head_dim # For CI happiness (we didn't convert so head_dim is not directly used) self.config.num_hidden_layers = 2 * config.num_layers self.norm = LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = LongcatFlashRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, cache_position: torch.LongTensor | None = None, use_cache: bool | None = None, **kwargs: Unpack[TransformersKwargs], ): if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = ( torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=None, attentions=None, ) class LongcatFlashForCausalLM(DeepseekV3ForCausalLM): _keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"] def __init__(self, config): super().__init__(config) self.model = LongcatFlashModel(config) __all__ = ["LongcatFlashPreTrainedModel", "LongcatFlashModel", "LongcatFlashForCausalLM"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/longcat_flash/modular_longcat_flash.py", "license": "Apache License 2.0", "lines": 362, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/longcat_flash/test_modeling_longcat_flash.py
# Copyright 2025 Meituan and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LongcatFlash model.""" import copy import tempfile import unittest from pytest import mark from transformers import LongcatFlashConfig, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_large_cpu_ram, require_torch, require_torch_accelerator, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester from ...test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoTokenizer, Cache, LongcatFlashForCausalLM, LongcatFlashModel class LongcatFlashModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = LongcatFlashModel def __init__( self, parent, batch_size=2, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=144, ffn_hidden_size=288, expert_ffn_hidden_size=48, num_layers=1, # We have `self.num_hidden_layers = 2 * num_layers` in the body. See `LongcatFlashConfig`. num_attention_heads=8, num_key_value_heads=8, kv_lora_rank=16, q_lora_rank=48, qk_rope_head_dim=4, v_head_dim=8, qk_nope_head_dim=8, head_dim=4, n_routed_experts=4, zero_expert_num=2, moe_topk=2, routed_scaling_factor=1.0, hidden_act="silu", max_position_embeddings=128, initializer_range=0.02, rms_norm_eps=1e-6, bos_token_id=1, eos_token_id=2, pad_token_id=3, type_sequence_label_size=2, num_labels=3, num_choices=4, ): super().__init__(parent) self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.ffn_hidden_size = ffn_hidden_size self.expert_ffn_hidden_size = expert_ffn_hidden_size self.num_layers = num_layers self.num_hidden_layers = 2 * num_layers # for compatibility self.expected_num_hidden_layers = 2 # embedding + 2 layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.kv_lora_rank = kv_lora_rank self.q_lora_rank = q_lora_rank self.qk_rope_head_dim = qk_rope_head_dim self.v_head_dim = v_head_dim self.qk_nope_head_dim = qk_nope_head_dim self.head_dim = head_dim self.n_routed_experts = n_routed_experts self.zero_expert_num = zero_expert_num self.moe_topk = moe_topk self.routed_scaling_factor = routed_scaling_factor self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices def get_config(self): return LongcatFlashConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, ffn_hidden_size=self.ffn_hidden_size, expert_ffn_hidden_size=self.expert_ffn_hidden_size, num_layers=self.num_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, kv_lora_rank=self.kv_lora_rank, q_lora_rank=self.q_lora_rank, qk_rope_head_dim=self.qk_rope_head_dim, v_head_dim=self.v_head_dim, qk_nope_head_dim=self.qk_nope_head_dim, head_dim=self.head_dim, n_routed_experts=self.n_routed_experts, zero_expert_num=self.zero_expert_num, moe_topk=self.moe_topk, routed_scaling_factor=self.routed_scaling_factor, hidden_act=self.hidden_act, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, rms_norm_eps=self.rms_norm_eps, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongcatFlashModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states=None, encoder_attention_mask=None, ): model = LongcatFlashForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class LongcatFlashModelTest(CausalLMModelTest, unittest.TestCase): model_split_percents = [0.5, 0.8] model_tester_class = LongcatFlashModelTester @unittest.skip("LongcatFlash buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @unittest.skip("LongcatFlash buffers include complex numbers, which breaks this test") def test_save_load_fast_init_to_base(self): pass def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): self.assertIsInstance(past_key_values, Cache) k_embed_dim = config.qk_nope_head_dim + config.qk_rope_head_dim v_embed_dim = config.v_head_dim expected_key_shape = (batch_size, config.num_key_value_heads, seq_length, k_embed_dim) expected_value_shape = (batch_size, config.num_key_value_heads, seq_length, v_embed_dim) for layer_idx in range(config.num_hidden_layers): self.assertEqual(past_key_values.layers[layer_idx].keys.shape, expected_key_shape) self.assertEqual(past_key_values.layers[layer_idx].values.shape, expected_value_shape) @unittest.skip("LongcatFlash router uses weight.type() directly in forward which prevents offloading") def test_cpu_offload(self): pass @unittest.skip("LongcatFlash router uses weight.type() directly in forward which prevents offloading") def test_disk_offload_bin(self): pass @unittest.skip("LongcatFlash router uses weight.type() directly in forward which prevents offloading") def test_disk_offload_safetensors(self): pass @unittest.skip("Most probably because of the MOE, the moe and router does not ignore padding tokens") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip(reason="SDPA can't dispatch on flash due to unsupported head dims") def test_sdpa_can_dispatch_on_flash(self): pass @staticmethod def _prepare_config_headdim(config, requested_dim): # there's specific head dims due to lora compressions in longcat config = copy.deepcopy(config) config.attention_dropout = 0 if requested_dim > config.qk_rope_head_dim: config.qk_rope_head_dim = requested_dim config.qk_nope_head_dim = max(config.qk_nope_head_dim, requested_dim) config.v_head_dim = max(config.v_head_dim, requested_dim) config.qk_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim config.head_dim = requested_dim config.q_lora_rank = max(config.q_lora_rank, requested_dim * 4) config.kv_lora_rank = max(config.kv_lora_rank, requested_dim * 2) config.hidden_size = max(config.hidden_size, config.num_attention_heads * requested_dim) return config @require_flash_attn @require_torch_accelerator @require_bitsandbytes @mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: # TODO: this test should run on all classes instead if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) batch_size = dummy_attention_mask.shape[0] is_padding_right = dummy_attention_mask[:, -1].sum().item() != batch_size # To avoid errors with padding_side=="right" if is_padding_right: dummy_attention_mask = torch.ones_like(dummy_input) model = model_class.from_pretrained( tmpdirname, dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto", # small change to ensure device placement ) # no upcasting at all if model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) # with attention mask _ = model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @slow class LongcatFlashIntegrationTest(unittest.TestCase): short_model_id = "hf-internal-testing/LongCat-ShortCat" # This is a cut-down model that matches part of the early logits of the larger one # Only a couple experts + layers # But if it fails, it means the larger model might have issues as well model_id = "meituan-longcat/LongCat-Flash-Chat" @slow def test_shortcat_generation(self): self.model = LongcatFlashForCausalLM.from_pretrained( self.short_model_id, device_map="auto", dtype=torch.bfloat16, ) self.model.generation_config.bos_token_id = 1 self.model.generation_config.pad_token_id = 3 self.model.generation_config.eos_token_id = 2 self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) chat = [{"role": "user", "content": "Paris is..."}] inputs = self.tokenizer.apply_chat_template( chat, tokenize=True, add_generation_prompt=True, return_tensors="pt" ).to(self.model.device) with torch.no_grad(): outputs = self.model.generate(inputs, max_new_tokens=10, do_sample=False) response = self.tokenizer.batch_decode(outputs, skip_special_tokens=False)[0] expected_output = "[Round 0] USER:Paris is... ASSISTANT: dig年车龄juanaheast稍achaotingupebarebones" self.assertEqual(response, expected_output) @slow @require_large_cpu_ram def test_longcat_generation_cpu(self): # takes absolutely forever and a lot RAM, but allows to test the output in the CI model = LongcatFlashForCausalLM.from_pretrained(self.model_id, device_map="auto", dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained(self.model_id) chat = [{"role": "user", "content": "Paris is..."}] inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt") with torch.no_grad(): outputs = model.generate(inputs, max_new_tokens=3, do_sample=False) response = tokenizer.batch_decode(outputs, skip_special_tokens=False)[0] expected_output = "[Round 0] USER:Paris is... ASSISTANT:Paris is..." self.assertEqual(response, expected_output)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/longcat_flash/test_modeling_longcat_flash.py", "license": "Apache License 2.0", "lines": 319, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/olmo3/convert_olmo3_weights_to_hf.py
# Copyright 2025 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import argparse import gc import io import json import os import pickle import shutil import traceback import uuid from collections.abc import Sequence from concurrent.futures import ThreadPoolExecutor, as_completed from dataclasses import dataclass from pathlib import Path from typing import Any, cast import torch import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.metadata import Metadata, MetadataIndex, StorageMeta from torch.distributed.checkpoint.planner import ( LoadItemType, ReadItem, ) from torch.futures import Future from transformers import AutoTokenizer, Olmo3Config, Olmo3ForCausalLM from ...utils import strtobool """ Sample usage: ``` python src/transformers/models/olmo3/convert_olmo3_weights_to_hf.py \ --input_dir /path/to/downloaded/olmo3/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import Olmo3ForCausalLM, AutoTokenizer model = Olmo3ForCausalLM.from_pretrained("/output/path") tokenizer = AutoTokenizer.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def normalize_path(path: Path | str) -> str: return str(path).rstrip("/").replace("file://", "") def generate_uuid() -> str: return str(uuid.uuid4()) def get_bytes_range(path: Path | str, bytes_start: int, num_bytes: int) -> bytes: with open(path, "rb") as f: f.seek(bytes_start) return f.read(num_bytes) def _narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor: """ Narrow the tensor according to ``offsets`` and ``sizes``. """ narrowed_tensor = tensor for idx, (offset, size) in enumerate(zip(offsets, sizes)): if size < tensor.size(idx): # Reshape to get shard for this rank and we don't want autograd # recording here for the narrow op and 'local_shard' should be a # leaf variable in the autograd graph. narrowed_tensor = narrowed_tensor.narrow(idx, offset, size) return narrowed_tensor @dataclass class _StorageInfo: """This is the per entry storage info.""" relative_path: str offset: int length: int @dataclass class _StoragePrefix: prefix: str class RemoteFileSystemReader(dist_cp.StorageReader): """ A :class:`~torch.distributed.checkpoint.StorageReader` based on :class:`~torch.distributed.checkpoint.FileSystemReader` that can read data directly from cloud storage as well as a local directory. """ def __init__( self, path: Path | str, *, thread_count: int | None = None, pre_download: bool = False, work_dir: Path | str | None = None, ): super().__init__() if thread_count is not None and thread_count <= 0: raise ValueError("thread count must be at least 1") self.path = normalize_path(path) self.thread_count = thread_count or 1 self.pre_download = pre_download self.work_dir = normalize_path(work_dir) if work_dir is not None else None self.storage_data: dict[MetadataIndex, _StorageInfo] = {} self.load_id = generate_uuid() self._metadata: Metadata | None = None def _get_bytes(self, relative_path: str, offset: int, length: int) -> bytes: full_path = f"{self.path}/{relative_path}" return get_bytes_range(full_path, offset, length) def _get_content_for_read(self, read_item: ReadItem) -> tuple[ReadItem, bytes]: sinfo = self.storage_data[read_item.storage_index] content = self._get_bytes(sinfo.relative_path, sinfo.offset, sinfo.length) return (read_item, content) def reset(self, checkpoint_id: Path | str | None = None) -> None: self.storage_data = {} if checkpoint_id: self.path = normalize_path(checkpoint_id) self.load_id = generate_uuid() def read_data(self, plan: dist_cp.LoadPlan, planner: dist_cp.LoadPlanner) -> Future[None]: with ThreadPoolExecutor(max_workers=self.thread_count) as executor: read_item_content_futures = [] for read_item in plan.items: read_item_content_futures.append(executor.submit(self._get_content_for_read, read_item)) read_item_content_results = [] for f in as_completed(read_item_content_futures): try: read_item_content_results.append(f.result()) except BaseException: # NOTE: we might get an error here that can't be pickled, which causes a different failure # later when PyTorch tries to reduce that error across ranks. So here we just make # sure we're raising a simple error type that can be pickled. raise RuntimeError(f"Original error:\n{traceback.format_exc()}") # Modified from `FileSystemReader.read_data()` for read_item, content in read_item_content_results: bytes = io.BytesIO(content) bytes.seek(0) if read_item.type == LoadItemType.BYTE_IO: planner.load_bytes(read_item, bytes) else: # NOTE: 'weights_only=False' needed to load torchao's float8 linear layer checkpoints tensor = cast(torch.Tensor, torch.load(bytes, map_location="cpu", weights_only=False)) tensor = _narrow_tensor_by_index(tensor, read_item.storage_offsets, read_item.lengths) target_tensor = planner.resolve_tensor(read_item).detach() assert target_tensor.size() == tensor.size(), ( f"req {read_item.storage_index} mismatch sizes {target_tensor.size()} vs {tensor.size()}" ) target_tensor.copy_(tensor) planner.commit_tensor(read_item, target_tensor) fut: Future = Future() fut.set_result(None) return fut def read_metadata(self) -> Metadata: if self._metadata is None: try: if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with (Path(self.path) / ".metadata").open("rb") as metadata_file: metadata = pickle.load(metadata_file) except FileNotFoundError as exc: msg = f"'{self.path}' is not a distributed checkpoint folder." suggested_dir = os.path.join(self.path, "model_and_optim") if Path(os.path.join(suggested_dir, ".metadata")).exists(): msg += f" Did you mean to use '{suggested_dir}'?" raise FileNotFoundError(msg) from exc if getattr(metadata, "storage_meta", None) is None: metadata.storage_meta = StorageMeta() metadata.storage_meta.load_id = self.load_id self._metadata = metadata return self._metadata def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: del is_coordinator self.storage_data = metadata.storage_data assert self.storage_data is not None def prepare_local_plan(self, plan: dist_cp.LoadPlan) -> dist_cp.LoadPlan: return plan def prepare_global_plan(self, global_plan: list[dist_cp.LoadPlan]) -> list[dist_cp.LoadPlan]: return global_plan @property def checkpoint_id(self) -> str: return self.path @classmethod def validate_checkpoint_id(cls, checkpoint_id: Path | str) -> bool: del checkpoint_id return True def load_model(model_path: str): def _load_unsharded_keys( dir: Path | str, keys: list[str], *, pre_download: bool = False, work_dir: Path | str | None = None, ) -> dict[str, Any]: from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner from torch.distributed.checkpoint.state_dict_loader import _load_state_dict state_dict: dict[str, Any] = {} _load_state_dict( state_dict, storage_reader=RemoteFileSystemReader(dir, pre_download=pre_download, work_dir=work_dir), planner=_EmptyStateDictLoadPlanner(keys=keys), no_dist=True, ) return state_dict if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with (Path(model_path) / ".metadata").open("rb") as metadata_file: metadata = pickle.load(metadata_file) keys = [key for key in metadata.state_dict_metadata.keys() if key.startswith("model.")] # keys = ["model.blocks.0.attention.w_q.weight"] return _load_unsharded_keys( model_path, keys, # model_path, ["model.blocks.0.attention.w_q.weight", "model.blocks.0.attention.w_k.weight"] ) def write_model( model_path, input_base_path, include_tokenizer=True, tokenizer_id=None, tmp_cleanup=True, ): os.makedirs(model_path, exist_ok=True) tmp_model_path = os.path.join(model_path, "tmp") os.makedirs(tmp_model_path, exist_ok=True) config_path = Path(input_base_path) / "config.json" olmo3_config = json.loads(config_path.read_text()) model_config = olmo3_config["model"] block_config = model_config["block"] attention_config = block_config["attention"] tokenizer_config = olmo3_config["dataset"]["tokenizer"] n_layers = model_config["n_layers"] n_heads = attention_config["n_heads"] dim = model_config["d_model"] dims_per_head = dim // n_heads base = attention_config["rope"]["theta"] inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) max_position_embeddings = olmo3_config["train_module"]["max_sequence_length"] if attention_config.get("n_kv_heads", None) is not None: num_key_value_heads = model_config["n_kv_heads"] # for GQA / MQA else: num_key_value_heads = n_heads print(f"Fetching all parameters from the checkpoint at {input_base_path}.") # Not sharded # (The sharded implementation would also work, but this is simpler.) loaded = load_model(os.path.join(input_base_path, "model_and_optim"))["model"] print(loaded.keys()) # loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu", weights_only=True) param_count = 0 index_dict: dict[str, Any] = {"weight_map": {}} for layer_i in range(n_layers): filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" # Unsharded state_dict = { f"model.layers.{layer_i}.self_attn.q_proj.weight": loaded[f"blocks.{layer_i}.attention.w_q.weight"], f"model.layers.{layer_i}.self_attn.k_proj.weight": loaded[f"blocks.{layer_i}.attention.w_k.weight"], f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"blocks.{layer_i}.attention.w_v.weight"], f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"blocks.{layer_i}.attention.w_out.weight"], f"model.layers.{layer_i}.self_attn.q_norm.weight": loaded[f"blocks.{layer_i}.attention.q_norm.weight"], f"model.layers.{layer_i}.self_attn.k_norm.weight": loaded[f"blocks.{layer_i}.attention.k_norm.weight"], f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w1.weight"], f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w2.weight"], f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"blocks.{layer_i}.feed_forward.w3.weight"], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[ f"blocks.{layer_i}.attention_norm.weight" ], f"model.layers.{layer_i}.post_feedforward_layernorm.weight": loaded[ f"blocks.{layer_i}.feed_forward_norm.weight" ], } state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" # Unsharded # TODO: Deal with weight-tying state_dict = { "model.embed_tokens.weight": loaded["embeddings.weight"], "model.norm.weight": loaded["lm_head.norm.weight"], "lm_head.weight": loaded["lm_head.w_out.weight"], } for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) # Write configs index_dict["metadata"] = {"total_size": param_count * 2} write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) config = Olmo3Config( vocab_size=model_config["vocab_size"], hidden_size=dim, intermediate_size=block_config["feed_forward"]["hidden_size"], num_hidden_layers=n_layers, num_attention_heads=n_heads, num_key_value_heads=num_key_value_heads, max_position_embeddings=max_position_embeddings, pad_token_id=tokenizer_config["pad_token_id"], bos_token_id=None, eos_token_id=tokenizer_config["eos_token_id"], tie_word_embeddings=False, rms_norm_eps=block_config["layer_norm"]["eps"], rope_theta=base, ) config.save_pretrained(tmp_model_path) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() if include_tokenizer: tokenizer_id = tokenizer_id or tokenizer_config["identifier"] _write_tokenizer(model_path, tokenizer_id) print("Loading the checkpoint in a Olmo 3 model.") model = Olmo3ForCausalLM.from_pretrained(tmp_model_path, dtype=torch.bfloat16) print("Resizing token embeddings to match tokenizer config.") model.resize_token_embeddings(tokenizer_config["vocab_size"]) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format.") model.save_pretrained(model_path) if tmp_cleanup: # Make cleanup optional; attempting to `rmtree` the `tmp_model_path` causes # errors if using NFS. shutil.rmtree(tmp_model_path) def _write_tokenizer( output_path: Path, tokenizer_id: str, ) -> None: print(f"Saving a tokenizer to {output_path}.") tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) tokenizer.save_pretrained(output_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", required=True, help="Location of Olmo 3 weights, which contains config.yaml and model.pt.", ) parser.add_argument( "--no_tokenizer", action="store_false", dest="include_tokenizer", help="If set, do not convert OLMo tokenizer to HF tokenizer.", ) parser.add_argument( "--tokenizer", type=Path, default=None, help="Location of Olmo 3 tokenizer json file. Defaults to what is set in the config file.", ) parser.add_argument( "--output_dir", required=True, help="Location to write HF model and tokenizer", ) parser.add_argument( "--no_tmp_cleanup", action="store_false", dest="tmp_cleanup", help="If passed, don't remove temp dir at end of HF conversion.", ) args = parser.parse_args() write_model( model_path=args.output_dir, input_base_path=args.input_dir, include_tokenizer=args.include_tokenizer, tokenizer_id=args.tokenizer, tmp_cleanup=args.tmp_cleanup, ) if __name__ == "__main__": main()
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/olmo3/convert_olmo3_weights_to_hf.py", "license": "Apache License 2.0", "lines": 387, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/olmo3/modular_olmo3.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable import torch import torch.nn as nn from transformers.utils.generic import TransformersKwargs from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig, layer_type_validation from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_rope_utils import RopeParameters from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding from ..olmo2.modeling_olmo2 import ( Olmo2Attention, Olmo2DecoderLayer, Olmo2ForCausalLM, Olmo2Model, Olmo2PreTrainedModel, Olmo2RMSNorm, apply_rotary_pos_emb, eager_attention_forward, ) class Olmo3Config(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Olmo3Model`]. It is used to instantiate an OLMo3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/OLMo-3-0725-1B](https://huggingface.co/allenai/OLMo-3-0725-1B). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the Olmo3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Olmo3Model`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. sliding_window (`int`, *optional*, defaults to 4096): Size of the sliding window for sliding window attention. layer_types (`list`, *optional*): Attention pattern for each layer. Defaults to sliding window attention for 3 out of 4 layers, and full attention for every 4th layer. ```python >>> from transformers import Olmo3Model, Olmo3Config >>> # Initializing a Olmo3 7B style configuration >>> configuration = Olmo3Config() >>> # Initializing a model from the Olmo3 7B style configuration >>> model = Olmo3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "olmo3" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.k_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.v_proj": "colwise_gather_output", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.o_proj": "rowwise_split_input", # input is replicated due to the added norm on q and k "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 50304, hidden_size: int | None = 4096, intermediate_size: int | None = 11008, num_hidden_layers: int | None = 32, num_attention_heads: int | None = 32, num_key_value_heads: int | None = None, hidden_act: str | None = "silu", max_position_embeddings: int | None = 2048, initializer_range: float | None = 0.02, use_cache: bool | None = True, pad_token_id: int | None = 1, bos_token_id: int | None = None, eos_token_id: int | None = 50279, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, rms_norm_eps: float | None = 1e-5, sliding_window: int | None = 4096, layer_types: list[str] | None = None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.tie_word_embeddings = tie_word_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.rms_norm_eps = rms_norm_eps self.sliding_window = sliding_window self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if (i + 1) % 4 != 0 else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types, self.num_hidden_layers) self.rope_parameters = rope_parameters super().__init__(**kwargs) class Olmo3RMSNorm(Olmo2RMSNorm): pass # Olmo3 attention is identical to OLMo 2 attention except: # - Sliding window attention is used for 3 out of 4 layers. class Olmo3Attention(Olmo2Attention): def __init__(self, config: Olmo3Config, layer_idx: int): super().__init__(config, layer_idx=layer_idx) self.attention_type = config.layer_types[layer_idx] self.sliding_window = config.sliding_window if self.attention_type == "sliding_attention" else None def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor | None]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states)) key_states = self.k_norm(self.k_proj(hidden_states)) value_states = self.v_proj(hidden_states) query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Olmo3DecoderLayer(Olmo2DecoderLayer): pass class Olmo3RotaryEmbedding(Gemma2RotaryEmbedding): pass class Olmo3PreTrainedModel(Olmo2PreTrainedModel): pass # The OLMo 3 model is identical to the OLMo 2 model, except: # - Sliding window attention is used for 3 out of 4 layers. # - RoPE scaling is not applied to sliding window attention layers. class Olmo3Model(Olmo2Model): def __init__(self, config: Olmo3Config): super().__init__(config) self.norm = Olmo3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.layers = nn.ModuleList( [Olmo3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.rotary_emb = Olmo3RotaryEmbedding(config=config) def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, cache_position: torch.LongTensor | None = None, use_cache: bool | None = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = ( torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "inputs_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.self_attn.attention_type], position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Olmo3ForCausalLM(Olmo2ForCausalLM): pass __all__ = [ "Olmo3Config", "Olmo3ForCausalLM", "Olmo3Model", "Olmo3PreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/olmo3/modular_olmo3.py", "license": "Apache License 2.0", "lines": 304, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/olmo3/test_modeling_olmo3.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Olmo3 model.""" import unittest import pytest from parameterized import parameterized from transformers import is_torch_available, set_seed from transformers.generation.configuration_utils import GenerationConfig from transformers.models.auto.tokenization_auto import AutoTokenizer from transformers.testing_utils import ( Expectations, cleanup, require_torch, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester from ...test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import ( Olmo3ForCausalLM, Olmo3Model, ) from transformers.models.olmo3.modeling_olmo3 import Olmo3RotaryEmbedding class Olmo3ModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = Olmo3Model @require_torch class Olmo3ModelTest(CausalLMModelTest, unittest.TestCase): test_all_params_have_gradient = False model_tester_class = Olmo3ModelTester # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = Olmo3ForCausalLM if is_torch_available() else None @parameterized.expand([("linear",), ("dynamic",), ("yarn",)]) def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Rope only gets applied to full attention layers in Olmo3, so make all layers full attention. config.layer_types = ["full_attention"] * len(config.layer_types) short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = self.model_tester_class.base_model_class(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_parameters = {"rope_type": scaling_type, "factor": 10.0, "rope_theta": 10_000.0} scaled_model = self.model_tester_class.base_model_class(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) def test_model_rope_scaling_frequencies(self): """Tests the frequency properties of the different RoPE scaling types on the model RoPE layer.""" config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Parent test class's attempt to find Olmo3 rope fails, so we pass here explicitly. rope_class = Olmo3RotaryEmbedding scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn( 1, dtype=torch.float32, device=torch_device ) # used exclusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE config.rope_parameters = {"rope_type": "default", "rope_theta": 10_000.0} original_rope = rope_class(config=config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_parameters = {"rope_type": "linear", "factor": scaling_factor, "rope_theta": 10_000.0} linear_scaling_rope = rope_class(config=config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_parameters = {"rope_type": "dynamic", "factor": scaling_factor, "rope_theta": 10_000.0} ntk_scaling_rope = rope_class(config=config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) # Sanity check Yarn RoPE scaling # Scaling should be over the entire input config.rope_parameters = {"rope_type": "yarn", "factor": scaling_factor, "rope_theta": 10_000.0} yarn_scaling_rope = rope_class(config=config).to(torch_device) yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short) yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long) torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :]) torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :]) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_short, original_cos_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_long, original_sin_long) @require_torch class Olmo3IntegrationTest(unittest.TestCase): def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_model_7b_logits(self): input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]] model = Olmo3ForCausalLM.from_pretrained("shanearora/2025-sep-a-base-model").to( torch_device, dtype=torch.bfloat16 ) out = model(torch.tensor(input_ids, device=torch_device)).logits.float() # Expected mean on dim = -1 expectations = Expectations( { ("cuda", 8): [[1.9575, -2.4659, 0.5985, 1.3795, -0.5207, -0.9844, -2.7795, -1.0069]], } ) EXPECTED_MEAN = torch.tensor(expectations.get_expectation(), device=torch_device) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] expectations = Expectations( { ("cuda", 8): [8.5625, 5.7812, 4.4688, 2.7031, 3.1094, 4.8125, 5.7188, 3.4219, 2.3906, 2.0938, 3.9844, 5.4688, 3.5312, 5.0938, 2.7656, 8.8125, 9.4375, 9.0625, 8.5000, 8.1875, 7.8750, 7.5312, 7.3125, 7.2812, 7.0000, 2.5625, 4.0312, 3.1719, 7.6562, 4.5625], } ) # fmt: skip EXPECTED_SLICE = torch.tensor(expectations.get_expectation(), device=torch_device) torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_model_7b_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """Simply put, the theory of relativity states that 1) the laws of physics are the same for all observers, and 2) the speed of light is the same for all observers. The first part of the theory is called the principle of relativity, and the second part is called the principle of the constancy of the speed of light. The theory of rel""" prompt = "Simply put, the theory of relativity states that " tokenizer = AutoTokenizer.from_pretrained("allenai/dolma2-tokenizer", device_map="auto") model = Olmo3ForCausalLM.from_pretrained("shanearora/2025-sep-a-base-model", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @pytest.mark.torch_export_test @slow def test_export_static_cache(self): from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, convert_and_export_with_cache, ) olmo3_model = "shanearora/2025-sep-a-base-model" tokenizer = AutoTokenizer.from_pretrained(olmo3_model, pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETION = [ "Simply put, the theory of relativity states that 1) the laws of physics are the same for all observers, and 2", ] max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 generation_config = GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ) model = Olmo3ForCausalLM.from_pretrained( olmo3_model, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=generation_config, ) prompts = ["Simply put, the theory of relativity states that "] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + eager eager_generated_ids = model.generate( **prompt_tokens, max_new_tokens=max_new_tokens, do_sample=False, cache_implementation=cache_implementation ) eager_generated_text = tokenizer.batch_decode(eager_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, eager_generated_text) # Static Cache + export exported_program = convert_and_export_with_cache(model) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/olmo3/test_modeling_olmo3.py", "license": "Apache License 2.0", "lines": 235, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for PromptDepthAnything.""" import math from typing import TYPE_CHECKING, Optional, Union from ...image_processing_utils import BatchFeature from ...processing_utils import Unpack if TYPE_CHECKING: from ...modeling_outputs import DepthEstimatorOutput import torch import torchvision.transforms.v2.functional as tvF from ...image_processing_utils_fast import ( BaseImageProcessorFast, group_images_by_shape, reorder_images, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, ) from ...utils import ( TensorType, auto_docstring, requires_backends, ) from .image_processing_prompt_depth_anything import PromptDepthAnythingImageProcessorKwargs def _constrain_to_multiple_of(val, multiple, min_val=0, max_val=None): """Constrain a value to be a multiple of another value.""" x = round(val / multiple) * multiple if max_val is not None and x > max_val: x = math.floor(val / multiple) * multiple if x < min_val: x = math.ceil(val / multiple) * multiple return x def _get_resize_output_image_size( input_image: "torch.Tensor", output_size: tuple[int, int], keep_aspect_ratio: bool, multiple: int, ) -> tuple[int, int]: """Get the output size for resizing an image.""" input_height, input_width = input_image.shape[-2:] output_height, output_width = output_size # determine new height and width scale_height = output_height / input_height scale_width = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width) < abs(1 - scale_height): # fit width scale_height = scale_width else: # fit height scale_width = scale_height new_height = _constrain_to_multiple_of(scale_height * input_height, multiple=multiple) new_width = _constrain_to_multiple_of(scale_width * input_width, multiple=multiple) return (new_height, new_width) @auto_docstring class PromptDepthAnythingImageProcessorFast(BaseImageProcessorFast): model_input_names = ["pixel_values", "prompt_depth"] resample = PILImageResampling.BICUBIC image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"height": 384, "width": 384} do_resize = True do_rescale = True do_normalize = True keep_aspect_ratio = False ensure_multiple_of = 1 do_pad = False size_divisor = None prompt_scale_to_meter = 0.001 valid_kwargs = PromptDepthAnythingImageProcessorKwargs def __init__(self, **kwargs: Unpack[PromptDepthAnythingImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess( self, images: ImageInput, prompt_depth: ImageInput | None = None, **kwargs: Unpack[PromptDepthAnythingImageProcessorKwargs], ) -> BatchFeature: r""" prompt_depth (`ImageInput`, *optional*): Prompt depth to preprocess. """ return super().preprocess(images, prompt_depth, **kwargs) def resize_with_aspect_ratio( self, image: "torch.Tensor", size: SizeDict, keep_aspect_ratio: bool = False, ensure_multiple_of: int = 1, interpolation: Optional["tvF.InterpolationMode"] = None, ) -> "torch.Tensor": """ Resize an image to target size while optionally maintaining aspect ratio and ensuring dimensions are multiples. """ # Set default interpolation to BICUBIC to match the slow processor (causes slight numerical differences otherwise) if interpolation is None: interpolation = tvF.InterpolationMode.BICUBIC # Custom resize with aspect ratio preservation and ensure_multiple_of constraint output_size = _get_resize_output_image_size( image, output_size=(size["height"], size["width"]), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, ) # Standard resize method with calculated output size return self.resize( image=image, size=SizeDict(height=output_size[0], width=output_size[1]), interpolation=interpolation, ) def pad_image( self, image: "torch.Tensor", size_divisor: int, ) -> "torch.Tensor": """ Center pad an image to be a multiple of size_divisor. """ def _get_pad(size, size_divisor): new_size = math.ceil(size / size_divisor) * size_divisor pad_size = new_size - size pad_size_left = pad_size // 2 pad_size_right = pad_size - pad_size_left return pad_size_left, pad_size_right height, width = image.shape[-2:] # Match slow processor and PyTorch convention: width->left/right, height->top/bottom pad_size_left, pad_size_right = _get_pad(width, size_divisor) pad_size_top, pad_size_bottom = _get_pad(height, size_divisor) # Use torchvision padding for fast processing # /!\ NB: torchvision tvF.pad expects (left, top, right, bottom) for the last two dims (W then H) # Source: https://docs.pytorch.org/vision/main/generated/torchvision.transforms.Pad.html # So: (left=width_pad, top=height_pad, right=width_pad, bottom=height_pad) padding = [pad_size_left, pad_size_top, pad_size_right, pad_size_bottom] padded_image = tvF.pad(image, padding=padding) return padded_image def _preprocess_image_like_inputs( self, images: ImageInput, prompt_depth: ImageInput | None, input_data_format: ChannelDimension, device: Union[str, "torch.device"] | None = None, prompt_scale_to_meter: float | None = None, return_tensors: str | TensorType | None = None, **kwargs: Unpack[PromptDepthAnythingImageProcessorKwargs], ) -> BatchFeature: """ Preprocess image-like inputs, including the main images and optional prompt depth. """ images = self._prepare_image_like_inputs( images=images, do_convert_rgb=False, input_data_format=input_data_format, device=device ) # always use do_convert_rgb=False rather than defining it as a param to match slow processor # Process images with the standard pipeline pixel_values = self._preprocess(images, return_tensors=return_tensors, **kwargs) data = {"pixel_values": pixel_values} # Process prompt depth if provided if prompt_depth is not None: processed_prompt_depths = self._prepare_image_like_inputs( images=prompt_depth, do_convert_rgb=False, # Depth maps should not be converted input_data_format=input_data_format, device=images[0].device if images else device, expected_ndims=2, ) # Validate prompt_depths has same length as images as in slow processor if len(processed_prompt_depths) != len(images): raise ValueError( f"Number of prompt depth images ({len(processed_prompt_depths)}) does not match number of input images ({len(images)})" ) final_prompt_depths = [] for depth in processed_prompt_depths: depth = depth * prompt_scale_to_meter # Handle case where depth is constant (min == max) if depth.min() == depth.max(): depth[0, 0] = depth[0, 0] + 1e-6 # Add small variation to avoid numerical issues if depth.ndim == 2: # Add channel dimension if needed depth = depth.unsqueeze(0) # [H, W] -> [1, H, W] (channels first) depth = depth.float() # Convert to float32 to match slow processor final_prompt_depths.append(depth) if return_tensors: # Stack while preserving the [H, W, C] format that the slow processor uses final_prompt_depths = torch.stack(final_prompt_depths, dim=0) data["prompt_depth"] = final_prompt_depths return BatchFeature(data=data, tensor_type=return_tensors) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, keep_aspect_ratio: bool | None, interpolation: Optional["tvF.InterpolationMode"], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: float | list[float] | None, image_std: float | list[float] | None, do_pad: bool | None, disable_grouping: bool | None, ensure_multiple_of: int | None = None, return_tensors: str | TensorType | None = None, size_divisor: int | None = None, **kwargs, ) -> "torch.Tensor": """ Override the base _preprocess method to handle custom PromptDepthAnything parameters. """ grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize_with_aspect_ratio( image=stacked_images, size=size, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, interpolation=interpolation, ) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) if do_pad and size_divisor is not None: stacked_images = self.pad_image(stacked_images, size_divisor) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) # Only stack tensors if they all have the same shape and return_tensors is specified if return_tensors == "pt": processed_images = torch.stack(processed_images, dim=0) return processed_images def post_process_depth_estimation( self, outputs: "DepthEstimatorOutput", target_sizes: TensorType | list[tuple[int, int]] | None | None = None, ) -> list[dict[str, TensorType]]: """ Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images. Only supports PyTorch. Args: outputs ([`DepthEstimatorOutput`]): Raw outputs of the model. target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions. """ requires_backends(self, "torch") predicted_depth = outputs.predicted_depth if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the predicted depth" ) results = [] target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes for depth, target_size in zip(predicted_depth, target_sizes): if target_size is not None: depth = torch.nn.functional.interpolate( depth.unsqueeze(0).unsqueeze(1), size=target_size, mode="bicubic", align_corners=False ).squeeze() results.append({"predicted_depth": depth}) return results __all__ = ["PromptDepthAnythingImageProcessorFast"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py", "license": "Apache License 2.0", "lines": 285, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/qwen3_vl/modular_qwen3_vl.py
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen3-VL model.""" from collections.abc import Callable from dataclasses import dataclass from typing import Any import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling from ...modeling_rope_utils import RopeParameters, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import ProcessingKwargs, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import auto_docstring, can_return_tuple, logging from ...utils.generic import maybe_autocast, merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ...video_utils import VideoInput from ..llama.modeling_llama import LlamaRotaryEmbedding from ..qwen2_5_vl.modeling_qwen2_5_vl import ( Qwen2_5_VLCausalLMOutputWithPast, Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLVisionBlock, ) from ..qwen2_vl.modeling_qwen2_vl import ( PatchEmbed, Qwen2VLModel, Qwen2VLModelOutputWithPast, Qwen2VLPreTrainedModel, TransformersKwargs, VisionAttention, VisionRotaryEmbedding, ) from ..qwen2_vl.processing_qwen2_vl import Qwen2VLProcessor from ..qwen3.modeling_qwen3 import ( Qwen3Attention, Qwen3DecoderLayer, Qwen3Model, apply_rotary_pos_emb, eager_attention_forward, ) logger = logging.get_logger(__name__) @dataclass @auto_docstring class BaseModelOutputWithDeepstackFeatures(BaseModelOutputWithPooling): r""" deepstack_features (`List[torch.FloatTensor]`, *optional*): List of hidden-states (feature maps) from deepstack layers. """ deepstack_features: list[torch.FloatTensor] | None = None class Qwen3VLVisionConfig(PreTrainedConfig): model_type = "qwen3_vl" base_config_key = "vision_config" def __init__( self, depth=27, hidden_size=1152, hidden_act="gelu_pytorch_tanh", intermediate_size=4304, num_heads=16, in_channels=3, patch_size=16, spatial_merge_size=2, temporal_patch_size=2, out_hidden_size=3584, num_position_embeddings=2304, deepstack_visual_indexes=[8, 16, 24], initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.depth = depth self.hidden_size = hidden_size self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.num_heads = num_heads self.in_channels = in_channels self.patch_size = patch_size self.spatial_merge_size = spatial_merge_size self.temporal_patch_size = temporal_patch_size self.out_hidden_size = out_hidden_size self.num_position_embeddings = num_position_embeddings self.initializer_range = initializer_range self.deepstack_visual_indexes = deepstack_visual_indexes class Qwen3VLTextConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3VLTextModel`]. It is used to instantiate a Qwen3-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen3-VL-4B-Instruct [Qwen/Qwen3-VL-4B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-4B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151936): Vocabulary size of the Qwen3VL model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen3VLModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 22016): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 32): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. head_dim (`int`, *optional*, defaults to 128): The dimension of the head. If not specified, will default to `hidden_size // num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 128000): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. pad_token_id (`int`, *optional*): The id of the padding token. If unset, the config is treated as not having a dedicated padding token. ```python >>> from transformers import Qwen3VLTextModel, Qwen3VLTextConfig >>> # Initializing a Qwen3VL style configuration >>> configuration = Qwen3VLTextConfig() >>> # Initializing a model from the Qwen3-VL-7B style configuration >>> model = Qwen3VLTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen3_vl_text" base_config_key = "text_config" default_theta = 500000.0 def __init__( self, vocab_size: int | None = 151936, hidden_size: int | None = 4096, intermediate_size: int | None = 22016, num_hidden_layers: int | None = 32, num_attention_heads: int | None = 32, num_key_value_heads: int | None = 32, head_dim: int | None = 128, hidden_act: str | None = "silu", max_position_embeddings: int | None = 128000, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-6, use_cache: bool | None = True, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, pad_token_id: int | None = None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.head_dim = head_dim self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters self.pad_token_id = pad_token_id super().__init__( ignore_keys_at_rope_validation={"mrope_section", "mrope_interleaved"}, **kwargs, ) class Qwen3VLConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3VLModel`]. It is used to instantiate a Qwen3-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen3-VL-4B-Instruct [Qwen/Qwen3-VL-4B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-4B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLTextConfig`): The config object or dictionary of the text backbone. vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLVisionConfig`): The config object or dictionary of the vision backbone. image_token_id (`int`, *optional*, defaults to 151655): The token id used as the placeholder for image inputs. video_token_id (`int`, *optional*, defaults to 151656): The token id used as the placeholder for video inputs. vision_start_token_id (`int`, *optional*, defaults to 151652): The token id that marks the start of a vision segment (image or video). vision_end_token_id (`int`, *optional*, defaults to 151653): The token id that marks the end of a vision segment (image or video). tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. ```python >>> from transformers import Qwen3VLForConditionalGeneration, Qwen3VLConfig >>> # Initializing a Qwen3-VL style configuration >>> configuration = Qwen3VLConfig() >>> # Initializing a model from the Qwen3-VL-4B style configuration >>> model = Qwen3VLForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen3_vl" sub_configs = {"vision_config": Qwen3VLVisionConfig, "text_config": Qwen3VLTextConfig} keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, text_config=None, vision_config=None, image_token_id=151655, video_token_id=151656, vision_start_token_id=151652, vision_end_token_id=151653, tie_word_embeddings=False, **kwargs, ): if isinstance(vision_config, dict): self.vision_config = self.sub_configs["vision_config"](**vision_config) elif vision_config is None: self.vision_config = self.sub_configs["vision_config"]() if isinstance(text_config, dict): self.text_config = self.sub_configs["text_config"](**text_config) elif text_config is None: self.text_config = self.sub_configs["text_config"]() self.image_token_id = image_token_id self.video_token_id = video_token_id self.vision_start_token_id = vision_start_token_id self.vision_end_token_id = vision_end_token_id self.tie_word_embeddings = tie_word_embeddings super().__init__(**kwargs) class Qwen3VLVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.linear_fc1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=True) self.linear_fc2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=True) self.act_fn = ACT2FN[config.hidden_act] def forward(self, hidden_state): return self.linear_fc2(self.act_fn(self.linear_fc1(hidden_state))) class Qwen3VLVisionPatchEmbed(PatchEmbed): def __init__(self, config) -> None: super().__init__() self.patch_size = config.patch_size self.temporal_patch_size = config.temporal_patch_size self.in_channels = config.in_channels self.embed_dim = config.hidden_size kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size] self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=True) class Qwen3VLVisionRotaryEmbedding(VisionRotaryEmbedding): pass class Qwen3VLVisionPatchMerger(nn.Module): def __init__(self, config: Qwen3VLVisionConfig, use_postshuffle_norm=False) -> None: super().__init__() self.hidden_size = config.hidden_size * (config.spatial_merge_size**2) self.use_postshuffle_norm = use_postshuffle_norm self.norm = nn.LayerNorm(self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6) self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size) self.act_fn = nn.GELU() self.linear_fc2 = nn.Linear(self.hidden_size, config.out_hidden_size) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.norm(x.view(-1, self.hidden_size) if self.use_postshuffle_norm else x).view(-1, self.hidden_size) x = self.linear_fc2(self.act_fn(self.linear_fc1(x))) return x class Qwen3VLVisionAttention(VisionAttention): def __init__(self, config: Qwen3VLVisionConfig) -> None: super().__init__() self.dim = config.hidden_size class Qwen3VLVisionBlock(Qwen2_5_VLVisionBlock): def __init__(self, config, attn_implementation: str = "sdpa") -> None: super().__init__() self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6) self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6) self.attn = Qwen3VLVisionAttention(config=config) self.mlp = Qwen3VLVisionMLP(config=config) class Qwen3VLTextRotaryEmbedding(LlamaRotaryEmbedding): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Qwen3VLTextConfig, device=None): super().__init__(config, device=device) self.mrope_section = config.rope_parameters.get("mrope_section", [24, 20, 20]) def apply_interleaved_mrope(self, freqs, mrope_section): """Apply interleaved MRoPE to 3D rotary embeddings. Reorganizes frequency layout from chunked [TTT...HHH...WWW] to interleaved [THWTHWTHW...TT], preserving frequency continuity. args: x: (3, bs, seq_len, head_dim // 2) mrope_section: (3,) returns: x_t: (bs, seq_len, head_dim // 2) """ freqs_t = freqs[0] # just overwrite the first dimension T for dim, offset in enumerate((1, 2), start=1): # H, W length = mrope_section[dim] * 3 idx = slice(offset, length, 3) freqs_t[..., idx] = freqs[dim, ..., idx] return freqs_t @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): # In contrast to other models, Qwen3VL has different position ids for the grids # So we expand the inv_freq to shape (3, ...) if position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with maybe_autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) freqs = self.apply_interleaved_mrope(freqs, self.mrope_section) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Qwen3VLTextAttention(Qwen3Attention): def __init__(self, config: Qwen3VLTextConfig, layer_idx: int): super().__init__(config, layer_idx) del self.sliding_window def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, torch.Tensor | None]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Qwen3VLTextDecoderLayer(Qwen3DecoderLayer): def __init__(self, config: Qwen3VLTextConfig, layer_idx: int): super().__init__(config, layer_idx) del self.attention_type def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, use_cache: bool | None = False, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: return super().forward( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) class Qwen3VLModelOutputWithPast(Qwen2VLModelOutputWithPast): pass class Qwen3VLPreTrainedModel(Qwen2VLPreTrainedModel): config: Qwen3VLConfig _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"] _can_record_outputs = { "hidden_states": Qwen3VLTextDecoderLayer, "attentions": Qwen3VLTextAttention, } def _init_weights(self, module): PreTrainedModel._init_weights(self, module) if isinstance(module, Qwen3VLVisionRotaryEmbedding): inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim)) init.copy_(module.inv_freq, inv_freq) class Qwen3VLVisionModel(Qwen3VLPreTrainedModel): config: Qwen3VLVisionConfig input_modalities = ("image", "video") _no_split_modules = ["Qwen3VLVisionBlock"] _can_record_outputs = { "hidden_states": Qwen3VLVisionBlock, "attentions": Qwen3VLVisionAttention, } def __init__(self, config, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.spatial_merge_size = config.spatial_merge_size self.patch_size = config.patch_size self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size self.patch_embed = Qwen3VLVisionPatchEmbed( config=config, ) self.pos_embed = nn.Embedding(config.num_position_embeddings, config.hidden_size) self.num_grid_per_side = int(config.num_position_embeddings**0.5) head_dim = config.hidden_size // config.num_heads self.rotary_pos_emb = Qwen3VLVisionRotaryEmbedding(head_dim // 2) self.blocks = nn.ModuleList([Qwen3VLVisionBlock(config) for _ in range(config.depth)]) self.merger = Qwen3VLVisionPatchMerger( config=config, use_postshuffle_norm=False, ) self.deepstack_visual_indexes = config.deepstack_visual_indexes self.deepstack_merger_list = nn.ModuleList( [ Qwen3VLVisionPatchMerger( config=config, use_postshuffle_norm=True, ) for _ in range(len(config.deepstack_visual_indexes)) ] ) self.gradient_checkpointing = False self.post_init() def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor: merge_size = self.spatial_merge_size grid_thw_list = grid_thw.tolist() max_hw = max(max(h, w) for _, h, w in grid_thw_list) freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2) device = freq_table.device total_tokens = sum(t * h * w for t, h, w in grid_thw_list) pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device) offset = 0 for num_frames, height, width in grid_thw_list: merged_h, merged_w = height // merge_size, width // merge_size block_rows = torch.arange(merged_h, device=device) # block row indices block_cols = torch.arange(merged_w, device=device) # block col indices intra_row = torch.arange(merge_size, device=device) # intra-block row offsets intra_col = torch.arange(merge_size, device=device) # intra-block col offsets # Compute full-resolution positions row_idx = block_rows[:, None, None, None] * merge_size + intra_row[None, None, :, None] col_idx = block_cols[None, :, None, None] * merge_size + intra_col[None, None, None, :] row_idx = row_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1) col_idx = col_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1) coords = torch.stack((row_idx, col_idx), dim=-1) if num_frames > 1: coords = coords.repeat(num_frames, 1) num_tokens = coords.shape[0] pos_ids[offset : offset + num_tokens] = coords offset += num_tokens embeddings = freq_table[pos_ids] # lookup rotary embeddings embeddings = embeddings.flatten(1) return embeddings def fast_pos_embed_interpolate(self, grid_thw): grid_thw_list = grid_thw.tolist() grid_ts = [row[0] for row in grid_thw_list] grid_hs = [row[1] for row in grid_thw_list] grid_ws = [row[2] for row in grid_thw_list] device = self.pos_embed.weight.device idx_list = [[] for _ in range(4)] weight_list = [[] for _ in range(4)] for t, h, w in grid_thw_list: h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h) w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w) h_idxs_floor = h_idxs.int() w_idxs_floor = w_idxs.int() h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1) w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1) dh = h_idxs - h_idxs_floor dw = w_idxs - w_idxs_floor base_h = h_idxs_floor * self.num_grid_per_side base_h_ceil = h_idxs_ceil * self.num_grid_per_side indices = [ (base_h[None].T + w_idxs_floor[None]).flatten(), (base_h[None].T + w_idxs_ceil[None]).flatten(), (base_h_ceil[None].T + w_idxs_floor[None]).flatten(), (base_h_ceil[None].T + w_idxs_ceil[None]).flatten(), ] weights = [ ((1 - dh)[None].T * (1 - dw)[None]).flatten(), ((1 - dh)[None].T * dw[None]).flatten(), (dh[None].T * (1 - dw)[None]).flatten(), (dh[None].T * dw[None]).flatten(), ] for i in range(4): idx_list[i].extend(indices[i].tolist()) weight_list[i].extend(weights[i].tolist()) idx_tensor = torch.tensor(idx_list, dtype=torch.long, device=device) weight_tensor = torch.tensor(weight_list, dtype=self.pos_embed.weight.dtype, device=device) pos_embeds = self.pos_embed(idx_tensor).to(device) * weight_tensor[:, :, None] patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3] patch_pos_embeds = patch_pos_embeds.split([h * w for h, w in zip(grid_hs, grid_ws)]) patch_pos_embeds_permute = [] merge_size = self.config.spatial_merge_size for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws): pos_embed = pos_embed.repeat(t, 1) pos_embed = ( pos_embed.view(t, h // merge_size, merge_size, w // merge_size, merge_size, -1) .permute(0, 1, 3, 2, 4, 5) .flatten(0, 4) ) patch_pos_embeds_permute.append(pos_embed) patch_pos_embeds = torch.cat(patch_pos_embeds_permute) return patch_pos_embeds @merge_with_config_defaults @capture_outputs def forward( self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs] ) -> tuple | BaseModelOutputWithDeepstackFeatures: """ Args: hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): The final hidden states of the model. grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): The temporal, height and width of feature shape of each image in LLM. Returns: `torch.Tensor`: hidden_states. """ hidden_states = self.patch_embed(hidden_states) pos_embeds = self.fast_pos_embed_interpolate(grid_thw) hidden_states = hidden_states + pos_embeds rotary_pos_emb = self.rot_pos_emb(grid_thw) seq_len, _ = hidden_states.size() hidden_states = hidden_states.reshape(seq_len, -1) rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) position_embeddings = (emb.cos(), emb.sin()) cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( dim=0, # Select dtype based on the following factors: # - FA2 requires that cu_seqlens_q must have dtype int32 # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw # See https://github.com/huggingface/transformers/pull/34852 for more information dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) deepstack_feature_lists = [] for layer_num, blk in enumerate(self.blocks): hidden_states = blk( hidden_states, cu_seqlens=cu_seqlens, position_embeddings=position_embeddings, **kwargs, ) if layer_num in self.deepstack_visual_indexes: deepstack_feature = self.deepstack_merger_list[self.deepstack_visual_indexes.index(layer_num)]( hidden_states ) deepstack_feature_lists.append(deepstack_feature) merged_hidden_states = self.merger(hidden_states) return BaseModelOutputWithDeepstackFeatures( last_hidden_state=hidden_states, pooler_output=merged_hidden_states, deepstack_features=deepstack_feature_lists, ) @auto_docstring( custom_intro=( "Text part of Qwen3VL, " "not a pure text-only model, as DeepStack integrates visual features into the early hidden states." ) ) class Qwen3VLTextModel(Qwen3VLPreTrainedModel, Qwen3Model): config: Qwen3VLTextConfig input_modalities = ("text",) _no_split_modules = ["Qwen3VLTextDecoderLayer"] def __init__(self, config: Qwen3VLTextConfig): super().__init__(config) del self.has_sliding_layers def _deepstack_process( self, hidden_states: torch.Tensor, visual_pos_masks: torch.Tensor, visual_embeds: torch.Tensor ): visual_pos_masks = visual_pos_masks.to(hidden_states.device) visual_embeds = visual_embeds.to(hidden_states.device, hidden_states.dtype) hidden_states = hidden_states.clone() local_this = hidden_states[visual_pos_masks, :] + visual_embeds hidden_states[visual_pos_masks, :] = local_this return hidden_states @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, # args for deepstack visual_pos_masks: torch.Tensor | None = None, deepstack_visual_embeds: list[torch.Tensor] | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple | BaseModelOutputWithPast: r""" visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*): The mask of the visual positions. deepstack_visual_embeds (`list[torch.Tensor]`, *optional*): The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim). The feature is extracted from the different visual encoder layers, and fed to the decoder hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334). """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `4` is for text, temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(4, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(4, position_ids.shape[0], -1) if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: text_position_ids = None attention_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=text_position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers for layer_idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=text_position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs # add visual features to the hidden states of first several layers if deepstack_visual_embeds is not None and layer_idx in range(len(deepstack_visual_embeds)): hidden_states = self._deepstack_process( hidden_states, visual_pos_masks, deepstack_visual_embeds[layer_idx], ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring class Qwen3VLModel(Qwen2VLModel): config: Qwen3VLConfig base_model_prefix = "model" _checkpoint_conversion_mapping = {} _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"] def __init__(self, config): super().__init__(config) self.visual = Qwen3VLVisionModel._from_config(config.vision_config) self.language_model = Qwen3VLTextModel._from_config(config.text_config) @can_return_tuple @auto_docstring def get_image_features( self, pixel_values: torch.FloatTensor, image_grid_thw: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutputWithDeepstackFeatures: r""" pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. """ pixel_values = pixel_values.type(self.visual.dtype) vision_output: BaseModelOutputWithDeepstackFeatures = self.visual( pixel_values, grid_thw=image_grid_thw, return_dict=True, **kwargs ) image_embeds = vision_output.pooler_output split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist() image_embeds = torch.split(image_embeds, split_sizes) vision_output.pooler_output = image_embeds return vision_output @can_return_tuple @auto_docstring def get_video_features( self, pixel_values_videos: torch.FloatTensor, video_grid_thw: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutputWithDeepstackFeatures: r""" pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. """ # Same implementation as for images return self.get_image_features(pixel_values_videos, video_grid_thw, **kwargs) @auto_docstring @can_return_tuple def forward( self, input_ids: torch.LongTensor = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, pixel_values: torch.Tensor | None = None, pixel_values_videos: torch.FloatTensor | None = None, image_grid_thw: torch.LongTensor | None = None, video_grid_thw: torch.LongTensor | None = None, mm_token_type_ids: torch.IntTensor | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Qwen3VLModelOutputWithPast: r""" image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) image_mask = None video_mask = None if pixel_values is not None: image_outputs: BaseModelOutputWithDeepstackFeatures = self.get_image_features( pixel_values, image_grid_thw, return_dict=True ) image_embeds = image_outputs.pooler_output deepstack_image_embeds = image_outputs.deepstack_features image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) image_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds ) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: video_outputs: BaseModelOutputWithDeepstackFeatures = self.get_video_features( pixel_values_videos, video_grid_thw, return_dict=True ) video_embeds = video_outputs.pooler_output deepstack_video_embeds = video_outputs.deepstack_features video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) _, video_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds ) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) visual_pos_masks = None deepstack_visual_embeds = None if image_mask is not None and video_mask is not None: # aggregate visual_pos_masks and deepstack_visual_embeds image_mask = image_mask[..., 0] video_mask = video_mask[..., 0] visual_pos_masks = image_mask | video_mask deepstack_visual_embeds = [] image_mask_joint = image_mask[visual_pos_masks] video_mask_joint = video_mask[visual_pos_masks] for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds): embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device) embed_joint[image_mask_joint, :] = img_embed embed_joint[video_mask_joint, :] = vid_embed deepstack_visual_embeds.append(embed_joint) elif image_mask is not None: image_mask = image_mask[..., 0] visual_pos_masks = image_mask deepstack_visual_embeds = deepstack_image_embeds elif video_mask is not None: video_mask = video_mask[..., 0] visual_pos_masks = video_mask deepstack_visual_embeds = deepstack_video_embeds if position_ids is None: position_ids = self.compute_3d_position_ids( input_ids=input_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, inputs_embeds=inputs_embeds, attention_mask=attention_mask, past_key_values=past_key_values, mm_token_type_ids=mm_token_type_ids, ) outputs = self.language_model( input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, cache_position=cache_position, visual_pos_masks=visual_pos_masks, deepstack_visual_embeds=deepstack_visual_embeds, **kwargs, ) return Qwen3VLModelOutputWithPast( **outputs, rope_deltas=self.rope_deltas, ) class Qwen3VLCausalLMOutputWithPast(Qwen2_5_VLCausalLMOutputWithPast): pass class Qwen3VLForConditionalGeneration(Qwen2_5_VLForConditionalGeneration): config: Qwen3VLConfig _checkpoint_conversion_mapping = {} @auto_docstring def get_image_features(self, **super_kwargs) -> tuple | BaseModelOutputWithDeepstackFeatures: return super().get_image_features(**super_kwargs) @auto_docstring def get_video_features(self, **super_kwargs) -> tuple | BaseModelOutputWithDeepstackFeatures: return super().get_video_features(**super_kwargs) @can_return_tuple def forward( self, input_ids: torch.LongTensor = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, pixel_values: torch.Tensor | None = None, pixel_values_videos: torch.FloatTensor | None = None, image_grid_thw: torch.LongTensor | None = None, video_grid_thw: torch.LongTensor | None = None, mm_token_type_ids: torch.IntTensor | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Qwen3VLCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. Example: ```python >>> from transformers import AutoProcessor, Qwen3VLForConditionalGeneration >>> model = Qwen3VLForConditionalGeneration.from_pretrained("Qwen/Qwen3-VL-8B-Instruct") >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-8B-Instruct") >>> messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg", }, {"type": "text", "text": "Describe the image."}, ], } ] >>> inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) >>> # Generate >>> generated_ids = model.generate(**inputs, max_new_tokens=1024) >>> generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] >>> output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] >>> print(output_text) ``` """ outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, cache_position=cache_position, mm_token_type_ids=mm_token_type_ids, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size) return Qwen3VLCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=outputs.rope_deltas, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, is_first_iteration=False, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, use_cache=use_cache, is_first_iteration=is_first_iteration, **kwargs, ) if not is_first_iteration and use_cache: model_inputs["pixel_values"] = None model_inputs["pixel_values_videos"] = None return model_inputs def _expand_inputs_for_generation( self, expand_size: int = 1, is_encoder_decoder: bool = False, input_ids: torch.LongTensor | None = None, **model_kwargs, ) -> tuple[torch.LongTensor, dict[str, Any]]: # Overwritten -- Qwen3VL use timestamps and remove second_per_grid_ts # Support for expanding tensors without a batch size dimension # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw # pixel_values.shape[0] is sum(seqlen_images for samples) # image_grid_thw.shape[0] is sum(num_images for samples) if expand_size == 1: return input_ids, model_kwargs visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw"] def _expand_dict_for_generation_visual(dict_to_expand): image_grid_thw = model_kwargs.get("image_grid_thw", None) video_grid_thw = model_kwargs.get("video_grid_thw", None) image_nums, video_nums = self._get_image_nums_and_video_nums( input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None) ) # video_nums: (batch_size,) # since video_nums is the number of videos in the input dependent on the input_ids(vision_start), # but qwen3vl append vision_start to each frame of each video, so we need to recover the real video_nums according to video_grid_thw if video_grid_thw is not None: cumulative_frame_counts = torch.cumsum(video_grid_thw[:, 0], dim=0) cumulative_token_video_counts = torch.cumsum(video_nums, dim=0) # Find video boundaries in cumulative_frame_counts video_boundary_indices = torch.searchsorted(cumulative_frame_counts, cumulative_token_video_counts) # example: video_boundary_indices = [3, 5] means video_nums = [4, 2] video_nums = torch.diff(torch.cat([-video_boundary_indices.new_ones(1), video_boundary_indices])) def _repeat_interleave_samples(x, lengths, repeat_times): samples = torch.split(x, lengths) repeat_args = [repeat_times] + [1] * (x.dim() - 1) result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0) return result for key in dict_to_expand: if key == "pixel_values": # split images into samples samples = torch.split(image_grid_thw, list(image_nums)) # compute the sequence length of images for each sample lengths = [torch.prod(sample, dim=1).sum() for sample in samples] dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "image_grid_thw": # get the num of images for each sample lengths = list(image_nums) dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "pixel_values_videos": samples = torch.split(video_grid_thw, list(video_nums)) lengths = [torch.prod(sample, dim=1).sum() for sample in samples] dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) elif key == "video_grid_thw": lengths = list(video_nums) dict_to_expand[key] = _repeat_interleave_samples( dict_to_expand[key], lengths=lengths, repeat_times=expand_size ) return dict_to_expand def _expand_dict_for_generation(dict_to_expand): for key in dict_to_expand: if key == "position_ids" and dict_to_expand[key].ndim == 3: dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=1) elif ( key != "cache_position" and dict_to_expand[key] is not None and isinstance(dict_to_expand[key], torch.Tensor) and key not in visual_keys ): dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) return dict_to_expand model_kwargs = _expand_dict_for_generation_visual(model_kwargs) if input_ids is not None: input_ids = input_ids.repeat_interleave(expand_size, dim=0) model_kwargs = _expand_dict_for_generation(model_kwargs) if is_encoder_decoder: if model_kwargs.get("encoder_outputs") is None: raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) return input_ids, model_kwargs class Qwen3VLProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": False, "return_token_type_ids": False, "return_mm_token_type_ids": True, }, "videos_kwargs": {"return_metadata": True}, } class Qwen3VLProcessor(Qwen2VLProcessor): def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs): super().__init__(image_processor, tokenizer, video_processor, chat_template, **kwargs) self.vision_start_token = ( "<|vision_start|>" if not hasattr(tokenizer, "vision_start_token") else tokenizer.vision_start_token ) self.vision_end_token = ( "<|vision_end|>" if not hasattr(tokenizer, "vision_end_token") else tokenizer.vision_end_token ) self.vision_start_token_id = ( tokenizer.vision_start_token_id if getattr(tokenizer, "vision_start_token_id", None) else tokenizer.convert_tokens_to_ids(self.vision_start_token) ) self.vision_end_token_id = ( tokenizer.vision_end_token_id if getattr(tokenizer, "vision_end_token_id", None) else tokenizer.convert_tokens_to_ids(self.vision_end_token) ) def __call__( self, images: ImageInput = None, text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None, videos: VideoInput = None, **kwargs: Unpack[Qwen3VLProcessorKwargs], ) -> BatchFeature: r""" Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. """ output_kwargs = self._merge_kwargs( Qwen3VLProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if images is not None: image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"]) image_grid_thw = image_inputs["image_grid_thw"] else: image_inputs = {} image_grid_thw = None if videos is not None: videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"]) video_grid_thw = videos_inputs["video_grid_thw"] # If user has not requested video metadata, pop it if not kwargs.get("return_metadata"): video_metadata = videos_inputs.pop("video_metadata") else: video_metadata = videos_inputs["video_metadata"] else: videos_inputs = {} video_grid_thw = None if not isinstance(text, list): text = [text] text = text.copy() # below lines change text in-place if image_grid_thw is not None: merge_length = self.image_processor.merge_size**2 index = 0 for i in range(len(text)): while self.image_token in text[i]: num_image_tokens = image_grid_thw[index].prod() // merge_length text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1) index += 1 text[i] = text[i].replace("<|placeholder|>", self.image_token) if video_grid_thw is not None: merge_length = self.video_processor.merge_size**2 index = 0 for i in range(len(text)): while self.video_token in text[i]: metadata = video_metadata[index] if metadata.fps is None: logger.warning_once( "Qwen3VL requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. " "Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. " "Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results." ) metadata.fps = 24 if metadata.fps is None else metadata.fps # if timestamps are not provided, calculate them curr_timestamp = self._calculate_timestamps( metadata.frames_indices, metadata.fps, self.video_processor.temporal_patch_size, ) video_placeholder = "" frame_seqlen = video_grid_thw[index][1:].prod() // merge_length for frame_idx in range(video_grid_thw[index][0]): curr_time = curr_timestamp[frame_idx] video_placeholder += f"<{curr_time:.1f} seconds>" video_placeholder += ( self.vision_start_token + "<|placeholder|>" * frame_seqlen + self.vision_end_token ) if f"{self.vision_start_token}{self.video_token}{self.vision_end_token}" in text[i]: text[i] = text[i].replace( f"{self.vision_start_token}{self.video_token}{self.vision_end_token}", video_placeholder, 1 ) else: # vllm may input video token directly text[i] = text[i].replace(self.video_token, video_placeholder, 1) index += 1 text[i] = text[i].replace("<|placeholder|>", self.video_token) return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"]) if return_mm_token_type_ids: array_ids = np.array(text_inputs["input_ids"]) mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) mm_token_type_ids[array_ids == self.image_token_id] = 1 mm_token_type_ids[array_ids == self.video_token_id] = 2 text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors) def _calculate_timestamps(self, indices: list[int] | np.ndarray, video_fps: float, merge_size: int = 2): if not isinstance(indices, list): indices = indices.tolist() if len(indices) % merge_size != 0: indices.extend(indices[-1] for _ in range(merge_size - len(indices) % merge_size)) timestamps = [idx / video_fps for idx in indices] # @JJJYmmm frames are merged by self.merge_size, \ # so we need to average the timestamps between the first/last frame within the temporal patch timestamps = [ (timestamps[i] + timestamps[i + merge_size - 1]) / 2 for i in range(0, len(timestamps), merge_size) ] return timestamps __all__ = [ "Qwen3VLConfig", "Qwen3VLTextConfig", "Qwen3VLVisionModel", "Qwen3VLForConditionalGeneration", "Qwen3VLModel", "Qwen3VLPreTrainedModel", "Qwen3VLProcessor", "Qwen3VLTextModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/qwen3_vl/modular_qwen3_vl.py", "license": "Apache License 2.0", "lines": 1219, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """video processor class for Qwen3-VL.""" import math import numpy as np import torch from ...feature_extraction_utils import BatchFeature from ...image_utils import ChannelDimension, PILImageResampling, SizeDict, get_image_size from ...processing_utils import Unpack, VideosKwargs from ...utils import TensorType, add_start_docstrings, logging from ...video_processing_utils import BASE_VIDEO_PROCESSOR_DOCSTRING, BaseVideoProcessor from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos logger = logging.get_logger(__name__) def smart_resize( num_frames: int, height: int, width: int, temporal_factor: int = 2, factor: int = 32, min_pixels: int = 128 * 128, max_pixels: int = 16 * 16 * 2 * 2 * 2 * 6144, ): if height < factor or width < factor: raise ValueError(f"height:{height} or width:{width} must be larger than factor:{factor}") elif max(height, width) / min(height, width) > 200: raise ValueError( f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}" ) h_bar = round(height / factor) * factor w_bar = round(width / factor) * factor t_bar = math.ceil(num_frames / temporal_factor) * temporal_factor if t_bar * h_bar * w_bar > max_pixels: beta = math.sqrt((num_frames * height * width) / max_pixels) h_bar = max(factor, math.floor(height / beta / factor) * factor) w_bar = max(factor, math.floor(width / beta / factor) * factor) elif t_bar * h_bar * w_bar < min_pixels: beta = math.sqrt(min_pixels / (num_frames * height * width)) h_bar = math.ceil(height * beta / factor) * factor w_bar = math.ceil(width * beta / factor) * factor return h_bar, w_bar class Qwen3VLVideoProcessorInitKwargs(VideosKwargs, total=False): patch_size: int temporal_patch_size: int merge_size: int min_frames: int max_frames: int @add_start_docstrings( "Constructs a fast Qwen3-VL image processor that dynamically resizes videos based on the original videos.", BASE_VIDEO_PROCESSOR_DOCSTRING, """ patch_size (`int`, *optional*, defaults to 16): The spacial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to 2): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to 2): The merge size of the vision encoder to llm encoder. """, ) class Qwen3VLVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BICUBIC size = {"shortest_edge": 128 * 32 * 32, "longest_edge": 32 * 32 * 768} image_mean = [0.5, 0.5, 0.5] image_std = [0.5, 0.5, 0.5] do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True patch_size = 16 temporal_patch_size = 2 merge_size = 2 fps = 2 min_frames = 4 max_frames = 768 do_sample_frames = True valid_kwargs = Qwen3VLVideoProcessorInitKwargs model_input_names = ["pixel_values_videos", "video_grid_thw"] def __init__(self, **kwargs: Unpack[Qwen3VLVideoProcessorInitKwargs]): super().__init__(**kwargs) if self.size is not None and ( self.size.get("shortest_edge", None) is None or self.size.get("longest_edge", None) is None ): raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") def _further_process_kwargs( self, size: SizeDict | None = None, **kwargs, ) -> dict: """ Update kwargs that need further processing before being validated Can be overridden by subclasses to customize the processing of kwargs. """ if size is not None and ("shortest_edge" not in size or "longest_edge" not in size): raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") return super()._further_process_kwargs(size=size, **kwargs) def sample_frames( self, metadata: VideoMetadata, num_frames: int | None = None, fps: int | float | None = None, **kwargs, ): """ Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames. If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames` and `fps` are mutually exclusive. Args: video (`torch.Tensor`): Video that need to be sampled. metadata (`VideoMetadata`): Metadata of the video containing information about total duration, fps and total number of frames. num_frames (`int`, *optional*): Maximum number of frames to sample. Defaults to `self.num_frames`. fps (`int` or `float`, *optional*): Target frames to sample per second. Defaults to `self.fps`. Returns: torch.Tensor: Sampled video frames. """ if fps is not None and num_frames is not None: raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!") total_num_frames = metadata.total_num_frames fps = fps if fps is not None else self.fps # If num_frames is not given but fps is, calculate num_frames from fps if num_frames is None and fps is not None: if metadata.fps is None: metadata.fps = 24 logger.warning_once( "Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. " "Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results." ) num_frames = int(total_num_frames / metadata.fps * fps) num_frames = min(max(num_frames, self.min_frames), self.max_frames, total_num_frames) if num_frames is None: num_frames = min(max(total_num_frames, self.min_frames), self.max_frames) indices = np.linspace(0, total_num_frames - 1, num_frames).round().astype(int) return indices def _preprocess( self, videos: list[torch.Tensor], do_convert_rgb: bool = True, do_resize: bool = True, size: SizeDict | None = None, interpolation: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: float = 1 / 255.0, do_normalize: bool = True, image_mean: float | list[float] | None = None, image_std: float | list[float] | None = None, patch_size: int | None = None, temporal_patch_size: int | None = None, merge_size: int | None = None, return_tensors: str | TensorType | None = None, **kwargs, ): grouped_videos, grouped_videos_index = group_videos_by_shape(videos) resized_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): B, T, C, H, W = stacked_videos.shape num_frames, height, width = T, H, W if do_resize: resized_height, resized_width = smart_resize( num_frames=num_frames, height=height, width=width, temporal_factor=temporal_patch_size, factor=patch_size * merge_size, min_pixels=size.shortest_edge, max_pixels=size.longest_edge, ) stacked_videos = stacked_videos.view(B * T, C, H, W) stacked_videos = self.resize( stacked_videos, size=SizeDict(height=resized_height, width=resized_width), interpolation=interpolation, ) stacked_videos = stacked_videos.view(B, T, C, resized_height, resized_width) resized_videos_grouped[shape] = stacked_videos resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index) # Group videos by size for further processing # Needed in case do_resize is False, or resize returns videos with different sizes grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos) processed_videos_grouped = {} processed_grids = {} for shape, stacked_videos in grouped_videos.items(): resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST) # Fused rescale and normalize stacked_videos = self.rescale_and_normalize( stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) patches = stacked_videos # Check that videos have `num_frames` divisible by `temporal_patch_size` T = patches.shape[1] if pad := -T % temporal_patch_size: repeats = patches[:, -1:].expand(-1, pad, -1, -1, -1) patches = torch.cat((patches, repeats), dim=1) batch_size, grid_t, channel = patches.shape[:3] grid_t = grid_t // temporal_patch_size grid_h, grid_w = resized_height // patch_size, resized_width // patch_size patches = patches.view( batch_size, grid_t, temporal_patch_size, channel, grid_h // merge_size, merge_size, patch_size, grid_w // merge_size, merge_size, patch_size, ) patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9) flatten_patches = patches.reshape( batch_size, grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size, ) processed_videos_grouped[shape] = flatten_patches processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) processed_grids = reorder_videos(processed_grids, grouped_videos_index) pixel_values_videos = torch.cat(processed_videos, dim=0) video_grid_thw = torch.tensor(processed_grids) data = { "pixel_values_videos": pixel_values_videos, "video_grid_thw": video_grid_thw, } return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["Qwen3VLVideoProcessor"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py", "license": "Apache License 2.0", "lines": 239, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen3-VL-MOE model.""" import torch import torch.nn as nn import torch.nn.functional as F from ... import initialization as init from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import MoeModelOutputWithPast from ...modeling_rope_utils import RopeParameters from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, can_return_tuple, logging from ...utils.output_capturing import OutputRecorder from ..qwen3_moe.modeling_qwen3_moe import ( Qwen3MoeDecoderLayer, Qwen3MoeExperts, Qwen3MoePreTrainedModel, Qwen3MoeRMSNorm, Qwen3MoeSparseMoeBlock, load_balancing_loss_func, ) from ..qwen3_vl.configuration_qwen3_vl import Qwen3VLConfig, Qwen3VLVisionConfig from ..qwen3_vl.modeling_qwen3_vl import ( Qwen3VLCausalLMOutputWithPast, Qwen3VLForConditionalGeneration, Qwen3VLModelOutputWithPast, Qwen3VLTextAttention, Qwen3VLTextModel, Qwen3VLVisionAttention, Qwen3VLVisionBlock, Qwen3VLVisionModel, Qwen3VLVisionRotaryEmbedding, ) logger = logging.get_logger(__name__) class Qwen3VLMoeTextConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3VLMoeTextModel`]. It is used to instantiate a Qwen3-VL-MOE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen3-VL-30B-A3B-Instruct [Qwen/Qwen3-VL-30B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-30B-A3B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151936): Vocabulary size of the Qwen2MoE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen2MoeModel`] hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 5632): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 16): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 128000): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. decoder_sparse_step (`int`, *optional*, defaults to 1): The frequency of the MoE layer. moe_intermediate_size (`int`, *optional*, defaults to 1408): Intermediate size of the routed expert. num_experts_per_tok (`int`, *optional*, defaults to 4): Number of selected experts. num_experts (`int`, *optional*, defaults to 60): Number of routed experts. mlp_only_layers (`List[int]`, *optional*, defaults to `[]`): Indicate which layers use Qwen3VLMoeMLP rather than Qwen3VLMoeSparseMoeBlock The list contains layer index, from 0 to num_layers-1 if we have num_layers layers If `mlp_only_layers` is empty, `decoder_sparse_step` is used to determine the sparsity. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. head_dim (`int`, *optional*): The dimension of the head. If not specified, will default to `hidden_size // num_attention_heads`. pad_token_id (`int`, *optional*): The id of the padding token. ```python >>> from transformers import Qwen3VLMoeForConditionalGeneration, Qwen3VLMoeConfig >>> # Initializing a Qwen3VLMoe style configuration >>> configuration = Qwen3VLMoeConfig() >>> # Initializing a model from the Qwen3-VL-30B-A3B style configuration >>> model = Qwen3VLMoeForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen3_vl_moe_text" base_config_key = "text_config" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 500000.0 # Default tensor parallel plan for base model `Qwen3VLMoe` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 151936, hidden_size: int | None = 2048, intermediate_size: int | None = 5632, num_hidden_layers: int | None = 24, num_attention_heads: int | None = 16, num_key_value_heads: int | None = 16, hidden_act: str | None = "silu", max_position_embeddings: int | None = 128000, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-6, use_cache: bool | None = True, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, decoder_sparse_step: int | None = 1, moe_intermediate_size: int | None = 1408, num_experts_per_tok: int | None = 4, num_experts: int | None = 60, mlp_only_layers: list[int] | None = None, rope_parameters: RopeParameters | None = None, head_dim: int | None = None, pad_token_id: int | None = None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.head_dim = head_dim or hidden_size // num_attention_heads self.rope_parameters = rope_parameters # MoE arguments self.decoder_sparse_step = decoder_sparse_step self.moe_intermediate_size = moe_intermediate_size self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.mlp_only_layers = [] if mlp_only_layers is None else mlp_only_layers self.pad_token_id = pad_token_id super().__init__( ignore_keys_at_rope_validation={"mrope_section", "mrope_interleaved"}, **kwargs, ) class Qwen3VLMoeVisionConfig(Qwen3VLVisionConfig): pass class Qwen3VLMoeConfig(Qwen3VLConfig): r""" This is the configuration class to store the configuration of a [`Qwen3VLMoeModel`]. It is used to instantiate a Qwen3-VL-MOE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen3-VL-30B-A3B-Instruct [Qwen/Qwen3-VL-30B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-30B-A3B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLMoeTextConfig`): The config object or dictionary of the text backbone. vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLMoeVisionConfig`): The config object or dictionary of the vision backbone. image_token_id (`int`, *optional*, defaults to 151655): The image token index to encode the image prompt. video_token_id (`int`, *optional*, defaults to 151656): The video token index to encode the image prompt. vision_start_token_id (`int`, *optional*, defaults to 151652): The start token index to encode the image prompt. vision_end_token_id (`int`, *optional*, defaults to 151653): The end token index to encode the image prompt. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. ```python >>> from transformers import Qwen3VLMoeForConditionalGeneration, Qwen3VLMoeConfig >>> # Initializing a Qwen3-VL-MOE style configuration >>> configuration = Qwen3VLMoeConfig() >>> # Initializing a model from the Qwen3-VL-30B-A3B style configuration >>> model = Qwen3VLMoeForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen3_vl_moe" sub_configs = {"vision_config": Qwen3VLMoeVisionConfig, "text_config": Qwen3VLMoeTextConfig} class Qwen3VLMoeTextRMSNorm(Qwen3MoeRMSNorm): pass class Qwen3VLMoeTextExperts(Qwen3MoeExperts): pass class Qwen3VLMoeTextTopKRouter(nn.Module): def __init__(self, config): super().__init__() self.top_k = config.num_experts_per_tok self.num_experts = config.num_experts self.hidden_dim = config.hidden_size self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim)) def forward(self, hidden_states): hidden_states = hidden_states.reshape(-1, self.hidden_dim) router_logits = F.linear(hidden_states, self.weight) # (seq_len, num_experts) router_logits = torch.nn.functional.softmax(router_logits, dtype=torch.float, dim=-1) router_top_value, router_indices = torch.topk(router_logits, self.top_k, dim=-1) # (seq_len, top_k) router_top_value /= router_top_value.sum(dim=-1, keepdim=True) router_top_value = router_top_value.to(router_logits.dtype) router_scores = router_top_value return router_logits, router_scores, router_indices class Qwen3VLMoeTextSparseMoeBlock(Qwen3MoeSparseMoeBlock): pass class Qwen3VLMoeTextAttention(Qwen3VLTextAttention): pass class Qwen3VLMoeTextDecoderLayer(Qwen3MoeDecoderLayer): pass class Qwen3VLMoePreTrainedModel(Qwen3MoePreTrainedModel): config: Qwen3VLMoeConfig input_modalities = ("text", "image", "video") _no_split_modules = ["Qwen3VLMoeTextDecoderLayer", "Qwen3VLMoeVisionBlock"] @torch.no_grad() def _init_weights(self, module): """Initialize the weights.""" PreTrainedModel._init_weights(self, module) if hasattr(self.config, "initializer_range"): std = self.config.initializer_range else: std = getattr(self.config.get_text_config(), "initializer_range", 0.02) if isinstance(module, Qwen3VLMoeTextExperts): init.normal_(module.gate_up_proj, mean=0.0, std=std) init.normal_(module.down_proj, mean=0.0, std=std) elif isinstance(module, Qwen3VLMoeTextTopKRouter): init.normal_(module.weight, mean=0.0, std=std) elif isinstance(module, Qwen3VLMoeVisionRotaryEmbedding): inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim)) init.copy_(module.inv_freq, inv_freq) class Qwen3VLMoeVisionRotaryEmbedding(Qwen3VLVisionRotaryEmbedding): pass class Qwen3VLMoeVisionAttention(Qwen3VLVisionAttention): pass class Qwen3VLMoeVisionBlock(Qwen3VLVisionBlock): pass class Qwen3VLMoeVisionModel(Qwen3VLVisionModel): _can_record_outputs = { "router_logits": OutputRecorder(Qwen3VLMoeTextTopKRouter, layer_name="mlp.gate", index=0), "hidden_states": Qwen3VLMoeVisionBlock, "attentions": Qwen3VLMoeVisionAttention, } class Qwen3VLMoeTextModel(Qwen3VLTextModel): def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, # args for deepstack visual_pos_masks: torch.Tensor | None = None, deepstack_visual_embeds: list[torch.Tensor] | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple | MoeModelOutputWithPast: r""" visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*): The mask of the visual positions. deepstack_visual_embeds (`list[torch.Tensor]`, *optional*): The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim). The feature is extracted from the different visual encoder layers, and fed to the decoder hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334). """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `4` is for text, temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(4, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(4, position_ids.shape[0], -1) if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: text_position_ids = None attention_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=text_position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers for layer_idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=text_position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs # add visual features to the hidden states of first several layers if deepstack_visual_embeds is not None and layer_idx in range(len(deepstack_visual_embeds)): hidden_states = self._deepstack_process( hidden_states, visual_pos_masks, deepstack_visual_embeds[layer_idx], ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Qwen3VLTextModel last_hidden_state=hidden_states, past_key_values=past_key_values, ) class Qwen3VLMoeModelOutputWithPast(Qwen3VLModelOutputWithPast): router_logits: tuple[torch.FloatTensor] | None = None class Qwen3VLMoeCausalLMOutputWithPast(Qwen3VLCausalLMOutputWithPast): router_logits: tuple[torch.FloatTensor] | None = None aux_loss: torch.FloatTensor | None = None class Qwen3VLMoeForConditionalGeneration(Qwen3VLForConditionalGeneration): @can_return_tuple def forward( self, input_ids: torch.LongTensor = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, pixel_values: torch.Tensor | None = None, pixel_values_videos: torch.FloatTensor | None = None, image_grid_thw: torch.LongTensor | None = None, video_grid_thw: torch.LongTensor | None = None, mm_token_type_ids: torch.IntTensor | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Qwen3VLMoeCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. Example: ```python >>> from PIL import Image >>> import httpx >>> from io import BytesIO >>> from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration >>> model = Qwen3VLMoeForConditionalGeneration.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto") >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct") >>> messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", }, {"type": "text", "text": "Describe this image in short."}, ], } ] >>> # Preparation for inference >>> inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) >>> inputs = inputs.to(model.device) >>> # Generate >>> generated_ids = model.generate(**inputs, max_new_tokens=128) >>> generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] >>> processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "A woman in a plaid shirt sits on a sandy beach at sunset, smiling as she gives a high-five to a yellow Labrador Retriever wearing a harness. The ocean waves roll in the background." ```""" outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, mm_token_type_ids=mm_token_type_ids, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size) aux_loss = None if kwargs.get("output_router_logits", False): aux_loss = load_balancing_loss_func( outputs.router_logits, self.config.text_config.num_experts, self.config.text_config.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.config.text_config.router_aux_loss_coef * aux_loss.to( loss.device ) # make sure to reside in the same device return Qwen3VLMoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=outputs.rope_deltas, router_logits=outputs.router_logits, ) __all__ = [ "Qwen3VLMoeConfig", "Qwen3VLMoeTextConfig", "Qwen3VLMoeVisionModel", "Qwen3VLMoeForConditionalGeneration", "Qwen3VLMoeModel", # noqa "Qwen3VLMoePreTrainedModel", "Qwen3VLMoeTextModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py", "license": "Apache License 2.0", "lines": 489, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/qwen3_vl/test_modeling_qwen3_vl.py
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen3-VL model.""" import copy import unittest from transformers import ( Qwen3VLConfig, Qwen3VLForConditionalGeneration, Qwen3VLModel, is_torch_available, ) from transformers.testing_utils import ( require_torch, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, ) if is_torch_available(): from transformers.models.qwen3_vl.configuration_qwen3_vl import Qwen3VLTextConfig from transformers.models.qwen3_vl.modeling_qwen3_vl import Qwen3VLTextModel if is_torch_available(): import torch class Qwen3VLVisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, num_channels=3, ignore_index=-100, image_size=16, text_config={ "bos_token_id": 0, "eos_token_id": 1, "pad_token_id": 2, "hidden_act": "silu", "head_dim": 8, "hidden_size": 32, "vocab_size": 99, "intermediate_size": 37, "max_position_embeddings": 512, "model_type": "qwen3_vl", "num_attention_heads": 4, "num_hidden_layers": 2, "num_key_value_heads": 2, "rope_theta": 10000, "tie_word_embeddings": True, "rope_parameters": {"rope_type": "default", "mrope_section": [16, 8, 8], "mrope_interleaved": True}, }, vision_config={ "depth": 2, "in_chans": 3, "hidden_act": "gelu_pytorch_tanh", "intermediate_size": 32, "out_hidden_size": 32, "hidden_size": 32, "num_heads": 4, "patch_size": 16, "spatial_merge_size": 1, "temporal_patch_size": 2, "num_position_embeddings": 16, "deepstack_visual_indexes": [0, 1], }, image_token_id=3, video_token_id=4, vision_start_token_id=5, vision_end_token_id=6, tie_word_embeddings=True, is_training=True, ): self.parent = parent self.ignore_index = ignore_index self.is_training = is_training self.vision_config = vision_config self.text_config = text_config self.vocab_size = text_config["vocab_size"] self.bos_token_id = text_config["bos_token_id"] self.eos_token_id = text_config["eos_token_id"] self.pad_token_id = text_config["pad_token_id"] self.head_dim = text_config["head_dim"] self.hidden_size = text_config["hidden_size"] self.intermediate_size = text_config["intermediate_size"] self.num_hidden_layers = text_config["num_hidden_layers"] self.num_attention_heads = text_config["num_attention_heads"] self.num_key_value_heads = text_config["num_key_value_heads"] self.rope_theta = text_config["rope_theta"] self.rope_parameters = text_config["rope_parameters"] self.hidden_act = text_config["hidden_act"] self.max_position_embeddings = text_config["max_position_embeddings"] self.model_type = text_config["model_type"] self.vision_start_token_id = vision_start_token_id self.vision_end_token_id = vision_end_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.tie_word_embeddings = tie_word_embeddings self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.num_image_tokens = 32 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return Qwen3VLConfig( text_config=self.text_config, vision_config=self.vision_config, image_token_id=self.image_token_id, video_token_id=self.video_token_id, vision_start_token_id=self.vision_start_token_id, vision_end_token_id=self.vision_end_token_id, tie_word_embeddings=self.tie_word_embeddings, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = self.pad_token_id input_ids[input_ids == self.video_token_id] = self.pad_token_id input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id input_ids[:, self.num_image_tokens] = self.image_token_id input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id mm_token_type_ids = torch.zeros_like(input_ids) mm_token_type_ids[:, self.num_image_tokens] = 1 inputs_dict = { "pixel_values": pixel_values, "image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device), "input_ids": input_ids, "attention_mask": attention_mask, "mm_token_type_ids": mm_token_type_ids, } return config, inputs_dict @require_torch class Qwen3VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen3VLForConditionalGeneration`. """ all_model_classes = ( ( Qwen3VLModel, Qwen3VLForConditionalGeneration, ) if is_torch_available() else () ) def setUp(self): self.model_tester = Qwen3VLVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen3VLConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() _ = model(**input_dict) # successful forward with no modifications curr_input_dict = copy.deepcopy(input_dict) # remove one image but leave the image token in text patch_size = config.vision_config.patch_size one_img_length = (self.model_tester.image_size**2) // (patch_size**2) curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...] curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) model.base_model.rope_deltas = None # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:one_img_length] image_grid_thw = curr_input_dict["image_grid_thw"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) model.base_model.rope_deltas = None # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0) _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) def test_video_forward(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() B = self.model_tester.batch_size C = config.vision_config.in_chans T = config.vision_config.temporal_patch_size P = config.vision_config.patch_size input_ids = ids_tensor([B, self.model_tester.seq_length], self.model_tester.vocab_size) F = 4 patch_H = self.model_tester.image_size // P patch_W = self.model_tester.image_size // P patch_T = F // T patches_per_video = patch_T * patch_H * patch_W pathed_per_frame = patch_H * patch_W pixel_values_videos = floats_tensor( [ # first dim: batch_size * num_patches B * patches_per_video, # second dim: in_channels * temporal_patch_size * patch_size^2 C * T * (P**2), ] ) # qwen3vl use timestamps for video, so split it into patch_T sub-videos video_grid_thw = torch.tensor([[1, patch_H, patch_W] for _ in range(patch_T)] * B) # sanity check self.assertEqual(pixel_values_videos.shape[0], video_grid_thw.prod(dim=1).sum().item()) # Insert video token sequence input_ids[:, -1] = self.model_tester.pad_token_id input_ids[input_ids == self.model_tester.video_token_id] = self.model_tester.pad_token_id input_ids[input_ids == self.model_tester.image_token_id] = self.model_tester.pad_token_id input_ids[input_ids == self.model_tester.vision_start_token_id] = self.model_tester.pad_token_id input_ids[:, self.model_tester.num_image_tokens] = self.model_tester.video_token_id insertion_point = self.model_tester.num_image_tokens self.assertLessEqual((B * patches_per_video) + insertion_point, self.model_tester.seq_length) for b in range(B): # each frame is separated by a vision_start_token_id for frame_idx in range(patch_T): input_ids[b, insertion_point + frame_idx * (pathed_per_frame + 1)] = ( self.model_tester.vision_start_token_id ) input_ids[ b, insertion_point + frame_idx * (pathed_per_frame + 1) + 1 : insertion_point + (frame_idx + 1) * (pathed_per_frame + 1), ] = self.model_tester.video_token_id for model_class in self.all_model_classes: # TODO:we should remove this because we use timestamps for video model = model_class(config).to(torch_device) outputs = model( input_ids=input_ids, pixel_values_videos=pixel_values_videos, video_grid_thw=video_grid_thw, ) self.assertIsNotNone(outputs) @require_torch class Qwen3VLTextModelPositionIdsTest(unittest.TestCase): """Regression tests for text_position_ids extraction (PR #44158).""" def get_text_config(self): return Qwen3VLTextConfig( vocab_size=99, hidden_size=32, intermediate_size=37, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, head_dim=8, hidden_act="silu", max_position_embeddings=512, rope_parameters={"rope_type": "default", "mrope_section": [16, 8, 8], "mrope_interleaved": True}, ) def _make_vision_position_ids(self, batch_size, seq_len): """Create 3D vision position_ids (temporal=0, height=arange, width=arange).""" pos = torch.zeros(3, batch_size, seq_len, dtype=torch.long, device=torch_device) pos[1] = torch.arange(seq_len, device=torch_device).unsqueeze(0).expand(batch_size, -1) pos[2] = torch.arange(seq_len, device=torch_device).unsqueeze(0).expand(batch_size, -1) return pos def test_3d_vision_position_ids_no_cache(self): config = self.get_text_config() model = Qwen3VLTextModel(config).to(torch_device).eval() batch_size, seq_len = 2, 10 input_ids = ids_tensor([batch_size, seq_len], config.vocab_size).to(torch_device) vision_position_ids = self._make_vision_position_ids(batch_size, seq_len) with torch.no_grad(): output = model(input_ids=input_ids, position_ids=vision_position_ids, use_cache=False) self.assertEqual(output.last_hidden_state.shape, (batch_size, seq_len, config.hidden_size)) def test_3d_vision_position_ids_produce_finite_output(self): config = self.get_text_config() model = Qwen3VLTextModel(config).to(torch_device).eval() batch_size, seq_len = 2, 8 input_ids = ids_tensor([batch_size, seq_len], config.vocab_size).to(torch_device) vision_position_ids = self._make_vision_position_ids(batch_size, seq_len) with torch.no_grad(): output_3d = model(input_ids=input_ids, position_ids=vision_position_ids, use_cache=False) output_none = model(input_ids=input_ids, position_ids=None, use_cache=False) self.assertTrue(torch.isfinite(output_3d.last_hidden_state).all()) self.assertTrue(torch.isfinite(output_none.last_hidden_state).all()) def test_4d_position_ids_forward(self): config = self.get_text_config() model = Qwen3VLTextModel(config).to(torch_device).eval() batch_size, seq_len = 2, 8 input_ids = ids_tensor([batch_size, seq_len], config.vocab_size).to(torch_device) text_pos = torch.arange(seq_len, device=torch_device).unsqueeze(0).expand(batch_size, -1) spatial_pos = torch.arange(seq_len, device=torch_device).unsqueeze(0).expand(batch_size, -1) zero_pos = torch.zeros(batch_size, seq_len, dtype=torch.long, device=torch_device) position_ids_4d = torch.stack([text_pos, zero_pos, spatial_pos, spatial_pos], dim=0) with torch.no_grad(): output = model(input_ids=input_ids, position_ids=position_ids_4d, use_cache=False) self.assertEqual(output.last_hidden_state.shape, (batch_size, seq_len, config.hidden_size)) self.assertTrue(torch.isfinite(output.last_hidden_state).all()) def test_use_cache_true_vs_false_with_vision_position_ids(self): """use_cache should not affect output when 3D vision position_ids are provided.""" config = self.get_text_config() model = Qwen3VLTextModel(config).to(torch_device).eval() batch_size, seq_len = 1, 12 input_ids = ids_tensor([batch_size, seq_len], config.vocab_size).to(torch_device) vision_position_ids = self._make_vision_position_ids(batch_size, seq_len) with torch.no_grad(): output_cached = model(input_ids=input_ids, position_ids=vision_position_ids.clone(), use_cache=True) output_no_cache = model(input_ids=input_ids, position_ids=vision_position_ids.clone(), use_cache=False) torch.testing.assert_close( output_cached.last_hidden_state, output_no_cache.last_hidden_state, atol=1e-5, rtol=1e-5 ) def test_2d_position_ids_forward(self): config = self.get_text_config() model = Qwen3VLTextModel(config).to(torch_device).eval() batch_size, seq_len = 2, 8 input_ids = ids_tensor([batch_size, seq_len], config.vocab_size).to(torch_device) position_ids_2d = torch.arange(seq_len, device=torch_device).unsqueeze(0).expand(batch_size, -1) with torch.no_grad(): output = model(input_ids=input_ids, position_ids=position_ids_2d, use_cache=False) self.assertEqual(output.last_hidden_state.shape, (batch_size, seq_len, config.hidden_size)) self.assertTrue(torch.isfinite(output.last_hidden_state).all())
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_vl/test_modeling_qwen3_vl.py", "license": "Apache License 2.0", "lines": 348, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/qwen3_vl/test_processing_qwen3_vl.py
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np from transformers.testing_utils import require_av, require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import Qwen3VLProcessor if is_torch_available(): import torch @require_vision @require_torch @require_torchvision class Qwen3VLProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Qwen3VLProcessor model_id = "Qwen/Qwen3-VL-235B-A22B-Instruct" @classmethod def _setup_from_pretrained(cls, model_id, **kwargs): return super()._setup_from_pretrained(model_id, patch_size=4, max_pixels=56 * 56, min_pixels=28 * 28, **kwargs) @classmethod def _setup_test_attributes(cls, processor): cls.image_token = processor.image_token def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) def test_model_input_names(self): processor = self.get_processor() text = self.prepare_text_inputs(modalities=["image", "video"]) image_input = self.prepare_image_inputs() video_inputs = self.prepare_video_inputs() inputs_dict = {"text": text, "images": image_input, "videos": video_inputs} inputs = processor(**inputs_dict, return_tensors="pt", do_sample_frames=False) self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names)) @require_torch @require_av def _test_apply_chat_template( self, modality: str, batch_size: int, return_tensors: str, input_name: str, processor_name: str, input_data: list[str], ): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") if processor_name not in self.processor_class.get_attributes(): self.skipTest(f"{processor_name} attribute not present in {self.processor_class}") batch_messages = [ [ { "role": "user", "content": [{"type": "text", "text": "Describe this."}], }, ] ] * batch_size # Test that jinja can be applied formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), batch_size) # Test that tokenizing with template and directly with `self.tokenizer` gives same output formatted_prompt_tokenized = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors ) add_special_tokens = True if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): add_special_tokens = False tok_output = processor.tokenizer( formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens ) expected_output = tok_output.input_ids self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) # Test that kwargs passed to processor's `__call__` are actually used tokenized_prompt_100 = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, padding="max_length", truncation=True, return_tensors=return_tensors, max_length=100, ) self.assertEqual(len(tokenized_prompt_100[0]), 100) # Test that `return_dict=True` returns text related inputs in the dict out_dict_text = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, ) self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"])) self.assertEqual(len(out_dict_text["input_ids"]), batch_size) self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict for idx, url in enumerate(input_data[:batch_size]): batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}] out_dict = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, max_frames=2, # by default no more than 2 frames, otherwise too slow ) input_name = getattr(self, input_name) self.assertTrue(input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), batch_size) self.assertEqual(len(out_dict["attention_mask"]), batch_size) if modality == "video": # qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw expected_video_token_count = 0 for thw in out_dict["video_grid_thw"]: expected_video_token_count += thw[0] * thw[1] * thw[2] mm_len = expected_video_token_count else: mm_len = batch_size * 192 self.assertEqual(len(out_dict[input_name]), mm_len) return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list} for k in out_dict: self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors]) @require_av @unittest.skip("qwen3_vl can't sample frames from image frames directly, user can use `qwen-vl-utils`") def test_apply_chat_template_video_1(self): pass @require_av @unittest.skip("qwen3_vl can't sample frames from image frames directly, user can use `qwen-vl-utils`") def test_apply_chat_template_video_2(self): pass @require_av def test_apply_chat_template_video_frame_sampling(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") messages = [ [ { "role": "user", "content": [ {"type": "video"}, {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), 1) formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask", "mm_token_type_ids"]) # for fast test, set the longest edge to 8192 processor.video_processor.size["longest_edge"] = 8192 # Add video URL for return dict and load with `num_frames` arg messages[0][0]["content"][0] = { "type": "video", "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4", } num_frames = 3 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, num_frames=num_frames, fps=None, # if pass num_frames, fps should be None ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 256) # Load with `fps` arg fps = 1 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 224) # Load with `fps` and `num_frames` args, should raise an error with self.assertRaises(ValueError): out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, fps=fps, num_frames=num_frames, ) # Load without any arg should load the whole video out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 224) # Load video as a list of frames (i.e. images). NOTE: each frame should have same size # because we assume they come from one video messages[0][0]["content"][0] = { "type": "video", "url": [ "https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg", ], } out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, do_sample_frames=False, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 216) def test_kwargs_overrides_custom_image_processor_kwargs(self): processor = self.get_processor() self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, max_pixels=56 * 56 * 4, return_tensors="pt") self.assertEqual(inputs[self.images_input_name].shape[0], 612) inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertEqual(inputs[self.images_input_name].shape[0], 100)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_vl/test_processing_qwen3_vl.py", "license": "Apache License 2.0", "lines": 251, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/qwen3_vl/test_video_processing_qwen3_vl.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs if is_torch_available(): from PIL import Image if is_vision_available() and is_torchvision_available(): from transformers import Qwen3VLVideoProcessor from transformers.models.qwen3_vl.video_processing_qwen3_vl import smart_resize class Qwen3VLVideoProcessingTester: def __init__( self, parent, batch_size=5, num_frames=8, num_channels=3, min_resolution=32, max_resolution=80, temporal_patch_size=2, patch_size=16, merge_size=2, do_resize=True, size=None, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_convert_rgb=True, ): size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10} self.parent = parent self.batch_size = batch_size self.num_frames = num_frames self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.temporal_patch_size = temporal_patch_size self.patch_size = patch_size self.merge_size = merge_size def prepare_video_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "do_sample_frames": True, } def prepare_video_metadata(self, videos): video_metadata = [] for video in videos: if isinstance(video, list): num_frames = len(video) elif hasattr(video, "shape"): if len(video.shape) == 4: # (T, H, W, C) num_frames = video.shape[0] else: num_frames = 1 else: num_frames = self.num_frames metadata = { "fps": 2, "duration": num_frames / 2, "total_num_frames": num_frames, } video_metadata.append(metadata) return video_metadata def expected_output_video_shape(self, videos): grid_t = self.num_frames // self.temporal_patch_size hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size seq_len = 0 for video in videos: if isinstance(video, list) and isinstance(video[0], Image.Image): video = np.stack([np.array(frame) for frame in video]) elif hasattr(video, "shape"): pass else: video = np.array(video) if hasattr(video, "shape") and len(video.shape) >= 3: if len(video.shape) == 4: t, height, width = video.shape[:3] elif len(video.shape) == 3: height, width = video.shape[:2] t = 1 else: t, height, width = self.num_frames, self.min_resolution, self.min_resolution else: t, height, width = self.num_frames, self.min_resolution, self.min_resolution resized_height, resized_width = smart_resize( t, height, width, factor=self.patch_size * self.merge_size, min_pixels=self.size["shortest_edge"], max_pixels=self.size["longest_edge"], ) grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size seq_len += grid_t * grid_h * grid_w return [seq_len, hidden_dim] def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"): videos = prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, return_tensors=return_tensors, ) return videos @require_torch @require_vision class Qwen3VLVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = Qwen3VLVideoProcessor if is_torchvision_available() else None input_name = "pixel_values_videos" def setUp(self): super().setUp() self.video_processor_tester = Qwen3VLVideoProcessingTester(self) @property def video_processor_dict(self): return self.video_processor_tester.prepare_video_processor_dict() def test_video_processor_from_dict_with_kwargs(self): video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict) self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10}) video_processor = self.fast_video_processing_class.from_dict( self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42} ) self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42}) def test_call_pil(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="pil" ) for video in video_inputs: self.assertIsInstance(video[0], Image.Image) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_numpy(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_pytorch(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="pt" ) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) @unittest.skip("Skip for now, the test needs adjustment for Qwen3VL") def test_call_numpy_4_channels(self): for video_processing_class in self.video_processor_list: # Test that can process videos which have an arbitrary number of channels # Initialize video_processing video_processor = video_processing_class(**self.video_processor_dict) # create random numpy tensors self.video_processor_tester.num_channels = 4 video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) # Test not batched input encoded_videos = video_processor( video_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processor( video_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_nested_input(self): """Tests that the processor can work with nested list where each video is a list of arrays""" for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) video_inputs_nested = [list(video) for video in video_inputs] video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) # Test not batched input encoded_videos = video_processing( video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) def test_call_sample_frames(self): for video_processing_class in self.video_processor_list: video_processor_dict = self.video_processor_dict.copy() video_processing = video_processing_class(**video_processor_dict) prev_num_frames = self.video_processor_tester.num_frames self.video_processor_tester.num_frames = 8 prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None) prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None) self.video_processor_tester.min_resolution = 56 self.video_processor_tester.max_resolution = 112 video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="torch", ) metadata = [[{"total_num_frames": 8, "fps": 4}]] batched_metadata = metadata * len(video_inputs) encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[ self.input_name ] encoded_videos_batched = video_processing( video_inputs, return_tensors="pt", video_metadata=batched_metadata )[self.input_name] self.assertIsNotNone(encoded_videos) self.assertIsNotNone(encoded_videos_batched) self.assertEqual(len(encoded_videos.shape), 2) self.assertEqual(len(encoded_videos_batched.shape), 2) self.video_processor_tester.num_frames = prev_num_frames if prev_min_resolution is not None: self.video_processor_tester.min_resolution = prev_min_resolution if prev_max_resolution is not None: self.video_processor_tester.max_resolution = prev_max_resolution def test_num_frames_equal_temporal_patch_size_plus_two(self): for video_processing_class in self.video_processor_list: video_processor_dict = self.video_processor_dict.copy() video_processor_dict["size"] = {"longest_edge": 5 * 32 * 32, "shortest_edge": 32 * 32} video_processor_dict["do_sample_frames"] = False temporal_patch_size = 3 video_processor_dict["temporal_patch_size"] = temporal_patch_size video_processing = video_processing_class(**video_processor_dict) n, w, h = 5, 32, 32 video_inputs = [(np.random.randint(0, 256, (h, w, 3), dtype=np.uint8)) for _ in range(n)] video_processed = video_processing(video_inputs, return_tensors="pt") encoded_videos = video_processed[self.input_name] self.assertEqual(list(encoded_videos.shape), [8, temporal_patch_size * 3 * 16 * 16]) video_grid_thw = video_processed["video_grid_thw"] self.assertEqual(video_grid_thw.tolist(), [[2, 2, 2]])
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_vl/test_video_processing_qwen3_vl.py", "license": "Apache License 2.0", "lines": 300, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/qwen3_vl_moe/test_modeling_qwen3_vl_moe.py
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen3VLMoe model.""" import copy import unittest import pytest from transformers import ( AutoProcessor, Qwen3VLMoeConfig, Qwen3VLMoeForConditionalGeneration, Qwen3VLMoeModel, is_torch_available, ) from transformers.testing_utils import ( Expectations, cleanup, require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, ) if is_torch_available(): import torch class Qwen3VLMoeVisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, num_channels=3, ignore_index=-100, image_size=16, text_config={ "bos_token_id": 0, "eos_token_id": 1, "pad_token_id": 2, "hidden_act": "silu", "hidden_size": 32, "vocab_size": 99, "intermediate_size": 37, "max_position_embeddings": 512, "model_type": "qwen3_vl_moe", "num_attention_heads": 4, "num_key_value_heads": 2, "num_hidden_layers": 2, "moe_intermediate_size": 16, "num_experts_per_tok": 4, "num_experts": 8, "rope_theta": 10000, "tie_word_embeddings": True, "rope_parameters": {"rope_type": "default", "mrope_section": [16, 8, 8], "mrope_interleaved": True}, }, vision_config={ "depth": 2, "in_chans": 3, "hidden_act": "gelu_pytorch_tanh", "intermediate_size": 32, "out_hidden_size": 32, "hidden_size": 32, "num_heads": 4, "patch_size": 16, "spatial_merge_size": 1, "temporal_patch_size": 2, "num_position_embeddings": 16, "deepstack_visual_indexes": [0, 1], }, image_token_id=3, video_token_id=4, vision_start_token_id=5, vision_end_token_id=6, tie_word_embeddings=True, is_training=True, ): self.parent = parent self.ignore_index = ignore_index self.is_training = is_training self.vision_config = vision_config self.text_config = text_config self.vocab_size = text_config["vocab_size"] self.bos_token_id = text_config["bos_token_id"] self.eos_token_id = text_config["eos_token_id"] self.pad_token_id = text_config["pad_token_id"] self.hidden_size = text_config["hidden_size"] self.intermediate_size = text_config["intermediate_size"] self.num_hidden_layers = text_config["num_hidden_layers"] self.num_attention_heads = text_config["num_attention_heads"] self.num_key_value_heads = text_config["num_key_value_heads"] self.rope_theta = text_config["rope_theta"] self.rope_parameters = text_config["rope_parameters"] self.hidden_act = text_config["hidden_act"] self.max_position_embeddings = text_config["max_position_embeddings"] self.model_type = text_config["model_type"] self.vision_start_token_id = vision_start_token_id self.vision_end_token_id = vision_end_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.tie_word_embeddings = tie_word_embeddings self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.num_image_tokens = 32 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return Qwen3VLMoeConfig( text_config=self.text_config, vision_config=self.vision_config, image_token_id=self.image_token_id, video_token_id=self.video_token_id, vision_start_token_id=self.vision_start_token_id, vision_end_token_id=self.vision_end_token_id, tie_word_embeddings=self.tie_word_embeddings, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = self.pad_token_id input_ids[input_ids == self.video_token_id] = self.pad_token_id input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id input_ids[:, self.num_image_tokens] = self.image_token_id input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id mm_token_type_ids = torch.zeros_like(input_ids) mm_token_type_ids[:, self.num_image_tokens] = 1 inputs_dict = { "pixel_values": pixel_values, "image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device), "input_ids": input_ids, "attention_mask": attention_mask, "mm_token_type_ids": mm_token_type_ids, } return config, inputs_dict @require_torch class Qwen3VLMoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen3VLMoeForConditionalGeneration`. """ all_model_classes = ( ( Qwen3VLMoeModel, Qwen3VLMoeForConditionalGeneration, ) if is_torch_available() else () ) def setUp(self): self.model_tester = Qwen3VLMoeVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen3VLMoeConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() _ = model(**input_dict) # successful forward with no modifications curr_input_dict = copy.deepcopy(input_dict) # remove one image but leave the image token in text patch_size = config.vision_config.patch_size one_img_length = (self.model_tester.image_size**2) // (patch_size**2) curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...] curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...] with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"): _ = model(**curr_input_dict) model.base_model.rope_deltas = None # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:one_img_length] image_grid_thw = curr_input_dict["image_grid_thw"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaisesRegex(ValueError, "Image features and image tokens do not match"): _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) model.base_model.rope_deltas = None # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0) _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) def test_video_forward(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() B = self.model_tester.batch_size C = config.vision_config.in_chans T = config.vision_config.temporal_patch_size P = config.vision_config.patch_size input_ids = ids_tensor([B, self.model_tester.seq_length], self.model_tester.vocab_size) F = 4 patch_H = self.model_tester.image_size // P patch_W = self.model_tester.image_size // P patch_T = F // T patches_per_video = patch_T * patch_H * patch_W pathed_per_frame = patch_H * patch_W pixel_values_videos = floats_tensor( [ # first dim: batch_size * num_patches B * patches_per_video, # second dim: in_channels * temporal_patch_size * patch_size^2 C * T * (P**2), ] ) video_grid_thw = torch.tensor([[1, patch_H, patch_W] for _ in range(patch_T)] * B) # sanity check self.assertEqual(pixel_values_videos.shape[0], video_grid_thw.prod(dim=1).sum().item()) # Insert video token sequence input_ids[:, -1] = self.model_tester.pad_token_id input_ids[input_ids == self.model_tester.video_token_id] = self.model_tester.pad_token_id input_ids[input_ids == self.model_tester.image_token_id] = self.model_tester.pad_token_id input_ids[input_ids == self.model_tester.vision_start_token_id] = self.model_tester.pad_token_id input_ids[:, self.model_tester.num_image_tokens] = self.model_tester.video_token_id insertion_point = self.model_tester.num_image_tokens self.assertLessEqual((B * patches_per_video) + insertion_point, self.model_tester.seq_length) for b in range(B): # each frame is separated by a vision_start_token_id for frame_idx in range(patch_T): input_ids[b, insertion_point + frame_idx * (pathed_per_frame + 1)] = ( self.model_tester.vision_start_token_id ) input_ids[ b, insertion_point + frame_idx * (pathed_per_frame + 1) + 1 : insertion_point + (frame_idx + 1) * (pathed_per_frame + 1), ] = self.model_tester.video_token_id for model_class in self.all_model_classes: # TODO:we should remove this because we use timestamps for video model = model_class(config).to(torch_device) outputs = model( input_ids=input_ids, pixel_values_videos=pixel_values_videos, video_grid_thw=video_grid_thw, ) self.assertIsNotNone(outputs) # Need to be False as we only use a Transpose without modifying the keys def test_reverse_loading_mapping(self, check_keys_were_modified=False): super().test_reverse_loading_mapping(check_keys_were_modified) @require_torch class Qwen3VLMoeIntegrationTest(unittest.TestCase): def setUp(self): cleanup(torch_device, gc_collect=True) self.processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct") self.processor.tokenizer.padding_side = "left" self.message = [ { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg", }, {"type": "text", "text": "What kind of dog is this?"}, ], } ] self.message2 = [ { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png", }, {"type": "text", "text": "What kind of dog is this?"}, ], } ] self.message3 = [ { "role": "user", "content": [ { "type": "video", "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4", }, {"type": "text", "text": "Describe the video in short."}, ], } ] def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_small_model_integration_test(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto" ) inputs = self.processor.apply_chat_template( self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) expected_input_ids = [151644, 872, 198, 151652, 151655, 151655, 151655, 151655, 151655, 151655, 151655, 151655, 151655, 151655, 151655, 151655, 151655] # fmt: skip self.assertListEqual(expected_input_ids, inputs.input_ids[0].tolist()[:17]) expected_pixel_slice = torch.tensor( [ [-0.0902, -0.0824, -0.0824], [-0.2627, -0.2627, -0.2627], [-0.0824, -0.0902, -0.0902], [-0.0118, -0.0510, -0.1137], [-0.5137, -0.5529, -0.6078], [-0.6941, -0.6314, -0.5765], ], dtype=torch.float32, device="cpu", ) self.assertTrue(torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)) # verify generation inputs = inputs.to(torch_device) output = model.generate(**inputs, max_new_tokens=30, do_sample=False) EXPECTED_DECODED_TEXT = "user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a small wild cat native to the grasslands and steppes" self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto" ) batch_messages = [self.message] * 2 inputs = self.processor.apply_chat_template( batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30, do_sample=False) EXPECTED_DECODED_TEXT = [ "user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a small wild cat native to the grasslands and montane regions", "user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a small wild cat native to the grasslands and montane regions" ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_with_video(self): processor = AutoProcessor.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", max_image_size={"longest_edge": 50176} ) model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype=torch.float16, device_map="auto" ) questions = ["How long is the video? Describe the it in short."] video_urls = ["https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"] messages = [ [ { "role": "user", "content": [ { "type": "video", "video": video_url, }, {"type": "text", "text": question}, ], } ] for question, video_url in zip(questions, video_urls) ] inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True ).to(torch_device) output = model.generate(**inputs, max_new_tokens=30, do_sample=False) EXPECTED_DECODED_TEXT = ["user\n<0.3 seconds><1.4 seconds><2.5 seconds><3.6 seconds><4.7 seconds><5.8 seconds>How long is the video? Describe the it in short.\nassistant\nThe video is 6 seconds long. It shows a man playing tennis on an indoor court. He is wearing a white shirt and black shorts. He"] # fmt: skip self.assertEqual( processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_expand(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto" ) inputs = self.processor.apply_chat_template( self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ).to(torch_device) output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2) EXPECTED_DECODED_TEXT = [ "user\nWhat kind of dog is this?\nassistant\nThe animal in the image is not a dog. It is a **Pallas's cat** (*Otocolobus manul*), also known", "user\nWhat kind of dog is this?\nassistant\nThe animal in the image is not a dog. It is a **Pallas's cat** (also known as the manul), a wild f" ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_expand_with_video(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto" ) inputs = self.processor.apply_chat_template( self.message3, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ).to(torch_device) output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2) EXPECTED_DECODED_TEXT = [ "user\n<0.3 seconds><1.3 seconds><2.4 seconds><3.5 seconds><4.6 seconds><5.6 seconds><6.7 seconds><7.8 seconds><8.9 seconds><9.7 seconds>Describe the video in short.\nassistant\nA baby wearing glasses sits on a bed and flips through a book.", "user\n<0.3 seconds><1.3 seconds><2.4 seconds><3.5 seconds><4.6 seconds><5.6 seconds><6.7 seconds><7.8 seconds><8.9 seconds><9.7 seconds>Describe the video in short.\nassistant\nA baby wearing glasses sits on a bed and flips through the pages of a book." ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_wo_image(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto" ) message_wo_image = [ {"role": "user", "content": [{"type": "text", "text": "Who are you?"}]}, ] batched_messages = [self.message, message_wo_image] inputs = self.processor.apply_chat_template( batched_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True, ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30, do_sample=False) EXPECTED_DECODED_TEXT = [ "user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a wild cat species native to the grasslands and steppes", "user\nWho are you?\nassistant\nI am Qwen, a large-scale language model developed by Alibaba Cloud's Tongyi Lab. I can assist you with answering questions, creating text such" ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_different_resolutions(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto" ) batched_messages = [self.message, self.message2] inputs = self.processor.apply_chat_template( batched_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True, ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30, do_sample=False) EXPECTED_DECODED_TEXT = [ "user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a wild cat species native to the grasslands and steppes", "user\nWhat kind of dog is this?\nassistant\nBased on the image provided, the animals are not dogs. They are two cats.\n\nHere is a description of the animals in the image:\n\n- " ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_flashatt2(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) batched_messages = [self.message, self.message2] inputs = self.processor.apply_chat_template( batched_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True, ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30, do_sample=False) # fmt: off EXPECTED_DECODED_TEXTS = Expectations( { (None, None): ["user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a wild cat species native to the grasslands and montane regions", "user\nWhat kind of dog is this?\nassistant\nBased on the image provided, there is no dog present. The animals in the picture are two cats.\n\nHere are some observations about the cats in the" ], ("xpu", None): ["user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a small wild cat native to the grasslands and steppes", 'user\nWhat kind of dog is this?\nassistant\nBased on the image provided, there is no dog present. The animals in the picture are two cats.\n\nHere is a description of the scene:\n-' ], } ) EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() # fmt: on DECODED_TEXT = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual( DECODED_TEXT, EXPECTED_DECODED_TEXT, ) @slow @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_wo_image_flashatt2(self): model = Qwen3VLMoeForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-30B-A3B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) message_wo_image = [ {"role": "user", "content": [{"type": "text", "text": "Who are you?"}]}, ] batched_messages = [self.message, message_wo_image] inputs = self.processor.apply_chat_template( batched_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True, ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30, do_sample=False) EXPECTED_DECODED_TEXT = [ "user\nWhat kind of dog is this?\nassistant\nThis is a Pallas's cat, also known as the manul. It's a wild cat species native to the grasslands and montane regions", "user\nWho are you?\nassistant\nI am Qwen, a large-scale language model developed by Alibaba Cloud's Tongyi Lab. I can assist you with answering questions, creating text such" ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_vl_moe/test_modeling_qwen3_vl_moe.py", "license": "Apache License 2.0", "lines": 563, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/vaultgemma/modular_vaultgemma.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ...cache_utils import Cache from ...modeling_rope_utils import RopeParameters from ..gemma2.configuration_gemma2 import Gemma2Config from ..gemma2.modeling_gemma2 import Gemma2Attention, Gemma2DecoderLayer, Gemma2ForCausalLM, Gemma2MLP, Gemma2RMSNorm class VaultGemmaConfig(Gemma2Config): r""" This is the configuration class to store the configuration of a [`VaultGemmaModel`]. It is used to instantiate an VaultGemma model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VaultGemma-7B. e.g. [google/vaultgemma-7b](https://huggingface.co/google/vaultgemma-7b) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the VaultGemma model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`VaultGemmaModel`] hidden_size (`int`, *optional*, defaults to 2304): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 9216): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 26): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. query_pre_attn_scalar (`float`, *optional*, defaults to 256): scaling factor used on the attention scores sliding_window (`int`, *optional*, defaults to 4096): in VaultGemma, every other layer uses sliding window attention. This is the size of the sliding window. layer_types (`list`, *optional*): Attention pattern for each layer. final_logit_softcapping (`float`, *optional*, defaults to 30.0): scaling factor when applying tanh softcapping on the logits. attn_logit_softcapping (`float`, *optional*, defaults to 50.0): scaling factor when applying tanh softcapping on the attention scores. ```python >>> from transformers import VaultGemmaModel, VaultGemmaConfig >>> # Initializing a VaultGemma vaultgemma-7b style configuration >>> configuration = VaultGemmaConfig() >>> # Initializing a model from the vaultgemma-7b style configuration >>> model = VaultGemmaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" def __init__( self, vocab_size: int | None = 256000, hidden_size: int | None = 2304, intermediate_size: int | None = 9216, num_hidden_layers: int | None = 26, num_attention_heads: int | None = 8, num_key_value_heads: int | None = 4, head_dim: int | None = 256, hidden_activation: str | None = "gelu_pytorch_tanh", max_position_embeddings: int | None = 8192, initializer_range: float | None = 0.02, rms_norm_eps: int | None = 1e-6, use_cache: bool | None = True, pad_token_id: int | None = 0, eos_token_id: int | None = 1, bos_token_id: int | None = 2, tie_word_embeddings: bool | None = True, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, query_pre_attn_scalar: int | None = 256, sliding_window: int | None = 4096, layer_types: list[str] | None = None, final_logit_softcapping: float | None = 30.0, attn_logit_softcapping: float | None = 50.0, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, head_dim=head_dim, hidden_activation=hidden_activation, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, rms_norm_eps=rms_norm_eps, use_cache=use_cache, pad_token_id=pad_token_id, eos_token_id=eos_token_id, bos_token_id=bos_token_id, tie_word_embeddings=tie_word_embeddings, rope_parameters=rope_parameters, attention_bias=attention_bias, attention_dropout=attention_dropout, query_pre_attn_scalar=query_pre_attn_scalar, sliding_window=sliding_window, layer_types=layer_types, final_logit_softcapping=final_logit_softcapping, attn_logit_softcapping=attn_logit_softcapping, **kwargs, ) del self.use_bidirectional_attention class VaultGemmaRMSNorm(Gemma2RMSNorm): pass class VaultGemmaMLP(Gemma2MLP): pass class VaultGemmaAttention(Gemma2Attention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: VaultGemmaConfig, layer_idx: int): super().__init__() self.is_causal = True class VaultGemmaDecoderLayer(Gemma2DecoderLayer): def __init__(self, **super_kwargs): super().__init__(**super_kwargs) del self.post_attention_layernorm del self.post_feedforward_layernorm def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs, ) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.pre_feedforward_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class VaultGemmaForCausalLM(Gemma2ForCausalLM): pass __all__ = [ "VaultGemmaConfig", "VaultGemmaForCausalLM", "VaultGemmaModel", # noqa: F822 "VaultGemmaPreTrainedModel", # noqa: F822 ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/vaultgemma/modular_vaultgemma.py", "license": "Apache License 2.0", "lines": 201, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/vaultgemma/test_modeling_vaultgemma.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VaultGemma model.""" import unittest import pytest from packaging import version from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoTokenizer, DynamicCache, is_torch_available, pipeline, ) from transformers.cache_utils import DynamicLayer, DynamicSlidingWindowLayer from transformers.generation.configuration_utils import GenerationConfig from transformers.testing_utils import ( Expectations, cleanup, is_flash_attn_2_available, is_kernels_available, is_torch_xpu_available, require_torch, require_torch_accelerator, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( VaultGemmaModel, ) class VaultGemmaModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = VaultGemmaModel @require_torch class VaultGemmaModelTest(CausalLMModelTest, unittest.TestCase): _is_stateful = True model_split_percents = [0.5, 0.6] model_tester_class = VaultGemmaModelTester @slow @require_torch_accelerator class VaultGemmaIntegrationTest(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_model_bf16(self): model_id = "google/vaultgemma-1b" EXPECTED_TEXTS = [ "<bos>Hello I am doing a project on a 1990 240sx. I have a 1", "<pad><pad><bos>Hi today I am going to show you how to make a simple 3D model of a 3D", ] model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, attn_implementation="eager").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=False) self.assertEqual(output_text, EXPECTED_TEXTS) def test_model_pipeline_bf16(self): model_id = "google/vaultgemma-1b" # EXPECTED_TEXTS should match the same non-pipeline test, minus the special tokens EXPECTED_TEXTS = [ "Hello I am doing a project on a 1990 240sx. I have a 1", "Hi today I am going to show you how to make a simple 3D model of a 3D", ] model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) output = pipe(self.input_text, max_new_tokens=20, do_sample=False, padding=True) self.assertEqual(output[0][0]["generated_text"], EXPECTED_TEXTS[0]) self.assertEqual(output[1][0]["generated_text"], EXPECTED_TEXTS[1]) @pytest.mark.torch_export_test @slow def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.5.0"): self.skipTest(reason="This test requires torch >= 2.5 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) model_id = "google/vaultgemma-1b" tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETIONS = Expectations( { ("cuda", 8): ["Hello I am doing a project on a 1990 240sx. I have a 1"], } ) EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation() max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompts = ["Hello I am doing"] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + export from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text) @parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)]) def test_generation_beyond_sliding_window(self, attn_implementation: str): """Test that we can correctly generate beyond the sliding window. This is non trivial as we need to correctly slice the attention mask in all cases (because we use a hybrid cache). Outputs for every attention functions should be coherent and identical. """ # Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model. if attn_implementation == "flex_attention": self.skipTest( reason="`flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.`" ) if ( attn_implementation == "flash_attention_2" and not is_flash_attn_2_available() and not (is_torch_xpu_available() and is_kernels_available()) ): self.skipTest("FlashAttention2 is required for this test.") model_id = "google/vaultgemma-1b" EXPECTED_COMPLETIONS = [ " place pretty place pretty place. place pretty place pretty place. place pretty place pretty place. place pretty", ", green, yellow, orange, purple, black, white, and gray.\n\nA list of", ] input_text = [ "This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens "A list of colors: red, blue", # This will almost all be padding tokens ] tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left") inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained( model_id, attn_implementation=attn_implementation, dtype=torch.float16 ).to(torch_device) # Make sure prefill is larger than sliding window input_size = inputs.input_ids.shape[-1] self.assertTrue(input_size > model.config.sliding_window) # It should by Hybrid by default from hub config, but let's make sure! out = model.generate(**inputs, max_new_tokens=20, cache_implementation="hybrid")[:, input_size:] output_text = tokenizer.batch_decode(out) self.assertEqual(output_text, EXPECTED_COMPLETIONS) @parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)]) def test_generation_beyond_sliding_window_dynamic(self, attn_implementation: str): """ Same as above, but explicitly setting the cache to Dynamic, as it's otherwise static by default for the model on the hub """ # Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model. if attn_implementation == "flex_attention": self.skipTest( reason="`flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.`" ) if ( attn_implementation == "flash_attention_2" and not is_flash_attn_2_available() and not (is_torch_xpu_available() and is_kernels_available()) ): self.skipTest("FlashAttention2 is required for this test.") model_id = "google/vaultgemma-1b" EXPECTED_COMPLETIONS = [ " place pretty place pretty place. place pretty place pretty place. place pretty place pretty place. place pretty", ", green, yellow, orange, purple, black, white, and gray.\n\nA list of", ] input_text = [ "This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens "A list of colors: red, blue", # This will almost all be padding tokens ] tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left") inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained( model_id, attn_implementation=attn_implementation, dtype=torch.float16 ).to(torch_device) # Make sure prefill is larger than sliding window input_size = inputs.input_ids.shape[-1] self.assertTrue(input_size > model.config.sliding_window) out = model.generate(**inputs, max_new_tokens=20, cache_implementation="dynamic", return_dict_in_generate=True) output_text = tokenizer.batch_decode(out.sequences[:, input_size:]) self.assertEqual(output_text, EXPECTED_COMPLETIONS) # Let's check that the dynamic cache has hybrid layers! dynamic_cache = out.past_key_values self.assertTrue(isinstance(dynamic_cache, DynamicCache)) for layer, layer_type in zip(dynamic_cache.layers, model.config.layer_types): if layer_type == "sliding_attention": self.assertTrue(isinstance(layer, DynamicSlidingWindowLayer)) self.assertEqual(layer.keys.shape[-2], model.config.sliding_window - 1) else: self.assertTrue(isinstance(layer, DynamicLayer)) # max_new_tokens - 1 because last token generated is not cached self.assertEqual(layer.keys.shape[-2], input_size + 20 - 1)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/vaultgemma/test_modeling_vaultgemma.py", "license": "Apache License 2.0", "lines": 229, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/ministral/modular_ministral.py
import torch from torch import nn from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PreTrainedConfig from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_rope_utils import RopeParameters from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring from ...utils.generic import merge_with_config_defaults from ...utils.output_capturing import capture_outputs from ..mistral.configuration_mistral import MistralConfig from ..qwen2.modeling_qwen2 import ( Qwen2Attention, Qwen2DecoderLayer, Qwen2ForCausalLM, Qwen2ForQuestionAnswering, Qwen2ForSequenceClassification, Qwen2ForTokenClassification, Qwen2MLP, Qwen2Model, Qwen2PreTrainedModel, Qwen2RMSNorm, Qwen2RotaryEmbedding, ) class MinistralConfig(MistralConfig, PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`MinistralModel`]. It is used to instantiate an Ministral model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Ministral-8B-Instruct-2410. [mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410) [mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Ministral model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MinistralModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to `4096*32`): The maximum sequence length that this model might ever be used with. Ministral's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention window size. If not specified, will default to `4096`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layer_types (`list`, *optional*): Attention pattern for each layer. ```python >>> from transformers import MinistralModel, MinistralConfig >>> # Initializing a Ministral 8B style configuration >>> configuration = MinistralConfig() >>> # Initializing a model from the Ministral 8B style configuration >>> model = MinistralModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "ministral" def __init__( self, vocab_size: int | None = 32000, hidden_size: int | None = 4096, intermediate_size: int | None = 14336, num_hidden_layers: int | None = 32, num_attention_heads: int | None = 32, num_key_value_heads: int | None = 8, head_dim: int | None = None, hidden_act: str | None = "silu", max_position_embeddings: int | None = 4096 * 32, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-6, use_cache: bool | None = True, pad_token_id: int | None = None, bos_token_id: int | None = 1, eos_token_id: int | None = 2, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | None = None, sliding_window: int | None = 4096, attention_dropout: float | None = 0.0, layer_types: list[str] | None = None, **kwargs, ): self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.tie_word_embeddings = tie_word_embeddings self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.head_dim = head_dim # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_dropout = attention_dropout self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if self.sliding_window is not None else "full_attention" ] * num_hidden_layers self.rope_parameters = rope_parameters PreTrainedConfig.__init__(self, **kwargs) class MinistralMLP(Qwen2MLP): pass class MinistralAttention(Qwen2Attention): def __init__(self, config, layer_idx: int): super().__init__(config, layer_idx) # Match Mistral: q/k/v do not have bias self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) class MinistralRMSNorm(Qwen2RMSNorm): pass class MinistralDecoderLayer(Qwen2DecoderLayer): pass class MinistralPreTrainedModel(Qwen2PreTrainedModel): pass class MinistralRotaryEmbedding(Qwen2RotaryEmbedding): pass class MinistralModel(Qwen2Model): def __init__(self, config: MinistralConfig): super().__init__(config) del self.has_sliding_layers @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "inputs_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class MinistralForCausalLM(Qwen2ForCausalLM): pass class MinistralForSequenceClassification(Qwen2ForSequenceClassification): pass class MinistralForTokenClassification(Qwen2ForTokenClassification): pass class MinistralForQuestionAnswering(Qwen2ForQuestionAnswering): pass __all__ = [ "MinistralConfig", "MinistralPreTrainedModel", "MinistralModel", "MinistralForCausalLM", "MinistralForSequenceClassification", "MinistralForTokenClassification", "MinistralForQuestionAnswering", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/ministral/modular_ministral.py", "license": "Apache License 2.0", "lines": 246, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/transformers:tests/models/ministral/test_modeling_ministral.py
# Copyright 2025 the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Ministral model.""" import gc import logging import unittest import pytest from transformers import AutoTokenizer, BitsAndBytesConfig, GenerationConfig, is_torch_available from transformers.testing_utils import ( backend_empty_cache, cleanup, require_bitsandbytes, require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, MinistralForCausalLM, MinistralModel, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester class MinistralModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = MinistralModel @require_torch class MinistralModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = MinistralModelTester # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="Ministral flash attention does not support right padding") @require_torch class MinistralIntegrationTest(unittest.TestCase): def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_model_8b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = AutoModelForCausalLM.from_pretrained("mistralai/Ministral-8B-Instruct-2410", device_map="auto") assert isinstance(model, MinistralForCausalLM) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.float().cpu() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-1.5029, -7.2815, 4.5190, 0.5930, -5.2526, 3.0765, -0.6314, 1.8068]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([-3.9446, -3.9466, 0.6383, -3.9466, -3.9468, -3.9448, -3.9462, -3.9455, -3.9451, -0.8244, -3.9472, -3.9458, -3.9460, -3.9406, -3.9462, -3.9462, -3.9458, -3.9462, -3.9463, -3.9461, -3.9448, -3.9451, -3.9462, -3.9458, -3.9455, -3.9452, -3.9458, -3.9469, -3.9460, -3.9464]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4) del model backend_empty_cache(torch_device) gc.collect() @slow def test_model_8b_generation(self): EXPECTED_TEXT_COMPLETION = "My favourite condiment is 100% natural, 100% organic, 100% free of" prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Mistralai/Ministral-8B-Instruct-2410") model = MinistralForCausalLM.from_pretrained("Mistralai/Ministral-8B-Instruct-2410", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect() @require_bitsandbytes @slow @require_flash_attn @pytest.mark.flash_attn_test def test_model_8b_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [36850, 4112] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = MinistralForCausalLM.from_pretrained( "Mistralai/Ministral-8B-Instruct-2410", device_map="auto", dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model del model backend_empty_cache(torch_device) gc.collect() @slow @unittest.skip("not working with Ministral") @pytest.mark.torch_export_test def test_export_text_with_hybrid_cache(self): # TODO: Exportability is not working from transformers.testing_utils import is_torch_greater_or_equal if not is_torch_greater_or_equal("2.6.0"): self.skipTest(reason="This test requires torch >= 2.6 to run.") from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM model_id = "Mistralai/Ministral-8B-Instruct-2410" model = MinistralForCausalLM.from_pretrained( model_id, generation_config=GenerationConfig( use_cache=True, cache_implementation="static", cache_config={ "batch_size": 1, "max_cache_len": 50, }, ), ) # Export model.eval() exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) logging.info(f"\nExported program: {exported_program}") # Test generation with the exported model prompt = "My favourite condiment is " max_new_tokens_to_generate = 20 # Generate text with the exported model tokenizer = AutoTokenizer.from_pretrained(model_id) export_generated_text = TorchExportableModuleForDecoderOnlyLM.generate( exported_program, tokenizer, prompt, max_new_tokens=max_new_tokens_to_generate ) logging.info(f"\nExport generated texts: '{export_generated_text}'") input_text = tokenizer(prompt, return_tensors="pt") with torch.no_grad(): eager_outputs = model.generate( **input_text, max_new_tokens=max_new_tokens_to_generate, do_sample=False, # Use greedy decoding to match the exported model cache_implementation="static", ) eager_generated_text = tokenizer.decode(eager_outputs[0], skip_special_tokens=True) logging.info(f"\nEager generated texts: '{eager_generated_text}'") self.assertEqual(export_generated_text, eager_generated_text) @pytest.mark.flash_attn_test @require_flash_attn @slow def test_past_sliding_window_generation(self): try: from datasets import load_dataset except ImportError: self.skipTest("datasets not found") model = MinistralForCausalLM.from_pretrained( "mistralai/Ministral-8B-Instruct-2410", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) tokenizer = AutoTokenizer.from_pretrained("mistralai/Ministral-8B-Instruct-2410", legacy=False) wiki = load_dataset("wikitext", "wikitext-103-raw-v1", split="validation") chunks = [x["text"] for x in wiki.select(range(550)) if x["text"].strip()] real_corpus = "\n".join(chunks) prompt = f"<s>[INST]{real_corpus} Question: Based on the text, at which depth of the continental shelf does H. Gammarus live?[/INST]" inputs = tokenizer(prompt, return_tensors="pt").to(model.device) input_length = inputs.input_ids.shape[1] # around 33k tokens > 32k sliding window outputs = model.generate(**inputs, max_new_tokens=100, do_sample=False) output_text = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True) self.assertEqual( output_text, " H. Gammarus lives on the continental shelf at depths of 0 – 150 metres ( 0 – 492 ft ) , although not normally deeper than 50 m ( 160 ft ) .", )
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/ministral/test_modeling_ministral.py", "license": "Apache License 2.0", "lines": 201, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:tests/models/glm4v/test_image_processing_glm4v.py
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import Glm4vImageProcessor from transformers.models.glm4v.image_processing_glm4v import smart_resize if is_torchvision_available(): from transformers import Glm4vImageProcessorFast class Glm4vImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=80, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], temporal_patch_size=2, patch_size=14, merge_size=2, ): size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.temporal_patch_size = temporal_patch_size self.patch_size = patch_size self.merge_size = merge_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "temporal_patch_size": self.temporal_patch_size, "patch_size": self.patch_size, "merge_size": self.merge_size, } def expected_output_image_shape(self, images): grid_t = 1 hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size seq_len = 0 for image in images: if isinstance(image, list) and isinstance(image[0], Image.Image): image = np.stack([np.array(frame) for frame in image]) elif hasattr(image, "shape"): pass else: image = np.array(image) if hasattr(image, "shape") and len(image.shape) >= 3: if isinstance(image, np.ndarray): if len(image.shape) == 4: height, width = image.shape[1:3] elif len(image.shape) == 3: height, width = image.shape[:2] else: height, width = self.min_resolution, self.min_resolution else: height, width = image.shape[-2:] else: height, width = self.min_resolution, self.min_resolution resized_height, resized_width = smart_resize( self.temporal_patch_size, height, width, factor=self.patch_size * self.merge_size, min_pixels=self.size["shortest_edge"], max_pixels=self.size["longest_edge"], ) grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size seq_len += grid_t * grid_h * grid_w return (seq_len, hidden_dim) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class Glm4vImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Glm4vImageProcessor if is_vision_available() else None fast_image_processing_class = Glm4vImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Glm4vImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 10, "longest_edge": 20}) image_processor = image_processing_class.from_dict( self.image_processor_dict, size={"shortest_edge": 42, "longest_edge": 42} ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 42}) # batch size is flattened def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy_4_channels(self): for image_processing_class in self.image_processor_list: # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input encoded_images = image_processor( image_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processor( image_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/glm4v/test_image_processing_glm4v.py", "license": "Apache License 2.0", "lines": 216, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/qwen3_next/configuration_qwen3_next.py
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Qwen3-Next model configuration""" from ...configuration_utils import PreTrainedConfig, layer_type_validation from ...modeling_rope_utils import RopeParameters from ...utils import logging logger = logging.get_logger(__name__) class Qwen3NextConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3NextModel`]. It is used to instantiate a Qwen3-Next model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen3-Next-80B-A3B-Instruct [Qwen/Qwen3-Next-80B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3-Next-80B-A3B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151936): Vocabulary size of the model. Defines the number of different tokens that can be represented by the `inputs_ids`. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 5632): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 48): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 2): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. hidden_act (`str`, *optional*, defaults to `"silu"`): The non-linear activation function in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. head_dim (`int`, *optional*, defaults to 256): Projection weights dimension in multi-head attention. linear_conv_kernel_dim (`int`, *optional*, defaults to 4): Kernel size of the convolution used in linear attention layers. linear_key_head_dim (`int`, *optional*, defaults to 128): Dimension of each key head in linear attention. linear_value_head_dim (`int`, *optional*, defaults to 128): Dimension of each value head in linear attention. linear_num_key_heads (`int`, *optional*, defaults to 16): Number of key heads used in linear attention layers. linear_num_value_heads (`int`, *optional*, defaults to 32): Number of value heads used in linear attention layers. decoder_sparse_step (`int`, *optional*, defaults to 1): The frequency of the MoE layer. moe_intermediate_size (`int`, *optional*, defaults to 512): Intermediate size of the routed expert. shared_expert_intermediate_size (`int`, *optional*, defaults to 512): Intermediate size of the shared expert. num_experts_per_tok (`int`, *optional*, defaults to 10): Number of selected experts. num_experts (`int`, *optional*, defaults to 512): Number of routed experts. norm_topk_prob (`bool`, *optional*, defaults to `True`): Whether to normalize the topk probabilities. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabling this will also allow the model to output the auxiliary loss, including load balancing loss and router z-loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. mlp_only_layers (`list[int]`, *optional*, defaults to `[]`): Indicate which layers use Qwen3NextMLP rather than Qwen3NextSparseMoeBlock The list contains layer index, from 0 to num_layers-1 if we have num_layers layers If `mlp_only_layers` is empty, `decoder_sparse_step` is used to determine the sparsity. layer_types (`list[str]`, *optional*): Types of each layer (attention or linear). pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*): End of stream token id. ```python >>> from transformers import Qwen3NextModel, Qwen3NextConfig >>> # Initializing a Qwen3Next style configuration >>> configuration = Qwen3NextConfig() >>> # Initializing a model from the Qwen3-Next-80B-A3B style configuration >>> model = Qwen3NextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "qwen3_next" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.experts.gate_up_proj": "packed_colwise", "layers.*.mlp.experts.down_proj": "rowwise", "layers.*.mlp.shared_expert.gate_proj": "colwise", "layers.*.mlp.shared_expert.up_proj": "colwise", "layers.*.mlp.shared_expert.down_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 151936, hidden_size: int | None = 2048, intermediate_size: int | None = 5632, num_hidden_layers: int | None = 48, num_attention_heads: int | None = 16, num_key_value_heads: int | None = 2, hidden_act: str | None = "silu", max_position_embeddings: int | None = 32768, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-6, use_cache: bool | None = True, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, head_dim: int | None = 256, linear_conv_kernel_dim: int | None = 4, linear_key_head_dim: int | None = 128, linear_value_head_dim: int | None = 128, linear_num_key_heads: int | None = 16, linear_num_value_heads: int | None = 32, decoder_sparse_step: int | None = 1, moe_intermediate_size: int | None = 512, shared_expert_intermediate_size: int | None = 512, num_experts_per_tok: int | None = 10, num_experts: int | None = 512, norm_topk_prob: bool | None = True, output_router_logits: bool | None = False, router_aux_loss_coef: float | None = 0.001, mlp_only_layers: list[int] | None = [], layer_types: list[str] | None = None, pad_token_id: int | None = None, bos_token_id: int | None = None, eos_token_id: int | None = None, **kwargs, ): self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.tie_word_embeddings = tie_word_embeddings self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.head_dim = head_dim self.rope_parameters = rope_parameters kwargs.setdefault("partial_rotary_factor", 0.25) # assign default for BC self.layer_types = layer_types if self.layer_types is None: interval_pattern = kwargs.get("full_attention_interval", 4) self.layer_types = [ "linear_attention" if bool((i + 1) % interval_pattern) else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types, self.num_hidden_layers) # linear attention part self.linear_conv_kernel_dim = linear_conv_kernel_dim self.linear_key_head_dim = linear_key_head_dim self.linear_value_head_dim = linear_value_head_dim self.linear_num_key_heads = linear_num_key_heads self.linear_num_value_heads = linear_num_value_heads # MoE arguments self.decoder_sparse_step = decoder_sparse_step self.moe_intermediate_size = moe_intermediate_size self.shared_expert_intermediate_size = shared_expert_intermediate_size self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.norm_topk_prob = norm_topk_prob self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.mlp_only_layers = mlp_only_layers super().__init__(**kwargs) __all__ = ["Qwen3NextConfig"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/qwen3_next/configuration_qwen3_next.py", "license": "Apache License 2.0", "lines": 222, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/qwen3_next/modular_qwen3_next.py
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen3-Next model.""" from collections.abc import Callable from typing import Any, Optional import torch import torch.nn.functional as F from torch import nn from ... import initialization as init from ...activations import ACT2FN from ...cache_utils import Cache from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.generic import merge_with_config_defaults from ...utils.import_utils import ( is_causal_conv1d_available, is_flash_linear_attention_available, ) from ...utils.output_capturing import OutputRecorder, capture_outputs from ..bamba.modeling_bamba import apply_mask_to_padding_states, apply_rotary_pos_emb from ..gemma2.modeling_gemma2 import Gemma2RotaryEmbedding from ..gemma3.modeling_gemma3 import Gemma3RMSNorm from ..llama.modeling_llama import ( LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, ) from ..mixtral.modeling_mixtral import MixtralForCausalLM from ..qwen2_moe.modeling_qwen2_moe import Qwen2MoeExperts, Qwen2MoeSparseMoeBlock, Qwen2MoeTopKRouter from ..qwen3_moe.modeling_qwen3_moe import ( Qwen3MoeAttention, Qwen3MoeDecoderLayer, Qwen3MoeMLP, eager_attention_forward, ) from .configuration_qwen3_next import Qwen3NextConfig if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None if is_flash_linear_attention_available(): from fla.modules import FusedRMSNormGated from fla.ops.gated_delta_rule import chunk_gated_delta_rule, fused_recurrent_gated_delta_rule else: chunk_gated_delta_rule, fused_recurrent_gated_delta_rule = None, None FusedRMSNormGated = None is_fast_path_available = all( (causal_conv1d_fn, causal_conv1d_update, chunk_gated_delta_rule, fused_recurrent_gated_delta_rule) ) logger = logging.get_logger(__name__) class Qwen3NextRMSNormGated(nn.Module): def __init__(self, hidden_size, eps=1e-6, **kwargs): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states, gate=None): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) # Norm before gate hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) hidden_states = self.weight * hidden_states.to(input_dtype) hidden_states = hidden_states * F.silu(gate.to(torch.float32)) return hidden_states.to(input_dtype) class Qwen3NextDynamicCache: """ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the linear attention cache (which has a constant shape regardless of seq_len). This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` and `ssm_states` for gated deltanet cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). For linear attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, and `recurrent_states` represents the recurrent state and has a shape of `(batch_size, d_inner, d_state)`. """ is_compileable = False def __init__(self, config: Qwen3NextConfig): super().__init__() self.layer_types = config.layer_types self.transformer_layers = [ i for i in range(config.num_hidden_layers) if self.layer_types[i] == "full_attention" ] self.last_linear_layer = len(self.layer_types) - 1 - self.layer_types[::-1].index("linear_attention") # Initialize everything to None -> will be lazy initialized to allow multi-gpu (device_map) inference self.conv_states = [None for _ in range(config.num_hidden_layers)] self.recurrent_states = [None for _ in range(config.num_hidden_layers)] self.key_cache = [None for _ in range(config.num_hidden_layers)] self.value_cache = [None for _ in range(config.num_hidden_layers)] def __len__(self): return len(self.layer_types) def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: if self.key_cache[layer_idx] is None: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): if self.key_cache[layer_idx] is not None: device = self.key_cache[layer_idx].device beam_idx = beam_idx.to(device) self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx) self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx) if self.conv_states[layer_idx] is not None: device = self.conv_states[layer_idx].device beam_idx = beam_idx.to(device) self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx) self.recurrent_states[layer_idx] = self.recurrent_states[layer_idx].index_select(0, beam_idx) def get_seq_length(self, layer_idx: int | None = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx] is None: return 0 return self.key_cache[layer_idx].shape[-2] def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: """ Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for the given layer at `layer_idx`. The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns for each layer. """ kv_offset = 0 query_length = cache_position.shape[0] past_seen_tokens = self.get_seq_length(layer_idx) kv_length = query_length + past_seen_tokens return kv_length, kv_offset @property def has_previous_state(self): """We have a previous state if the last linear (conv) layer was already updated.""" return self.conv_states[self.last_linear_layer] is not None class Qwen3NextRotaryEmbedding(Gemma2RotaryEmbedding): @staticmethod def compute_default_rope_parameters( config: Qwen3NextConfig | None = None, device: Optional["torch.device"] = None, seq_len: int | None = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0) head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads dim = int(head_dim * partial_rotary_factor) attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor class Qwen3NextRMSNorm(Gemma3RMSNorm): pass class Qwen3NextAttention(Qwen3MoeAttention): def __init__(self, config: Qwen3NextConfig, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim * 2, bias=config.attention_bias ) del self.sliding_window def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, torch.Tensor | None]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states, gate = torch.chunk( self.q_proj(hidden_states).view(*input_shape, -1, self.head_dim * 2), 2, dim=-1 ) gate = gate.reshape(*input_shape, -1) query_states = self.q_norm(query_states.view(hidden_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = attn_output * torch.sigmoid(gate) attn_output = self.o_proj(attn_output) return attn_output, attn_weights def torch_causal_conv1d_update( hidden_states, conv_state, weight, bias=None, activation=None, ): _, hidden_size, seq_len = hidden_states.shape state_len = conv_state.shape[-1] hidden_states_new = torch.cat([conv_state, hidden_states], dim=-1).to(weight.dtype) conv_state.copy_(hidden_states_new[:, :, -state_len:]) out = F.conv1d(hidden_states_new, weight.unsqueeze(1), bias, padding=0, groups=hidden_size) out = F.silu(out[:, :, -seq_len:]) out = out.to(hidden_states.dtype) return out def l2norm(x: torch.FloatTensor, dim: int = -1, eps: float = 1e-6): """This function is intended to align with the l2norm implementation in the FLA library.""" inv_norm = torch.rsqrt((x * x).sum(dim=dim, keepdim=True) + eps) return x * inv_norm def torch_chunk_gated_delta_rule( query, key, value, g, beta, chunk_size=64, initial_state=None, output_final_state=False, use_qk_l2norm_in_kernel=False, ): initial_dtype = query.dtype if use_qk_l2norm_in_kernel: query = l2norm(query, dim=-1, eps=1e-6) key = l2norm(key, dim=-1, eps=1e-6) query, key, value, beta, g = [ x.transpose(1, 2).contiguous().to(torch.float32) for x in (query, key, value, beta, g) ] batch_size, num_heads, sequence_length, k_head_dim = key.shape v_head_dim = value.shape[-1] pad_size = (chunk_size - sequence_length % chunk_size) % chunk_size query = F.pad(query, (0, 0, 0, pad_size)) key = F.pad(key, (0, 0, 0, pad_size)) value = F.pad(value, (0, 0, 0, pad_size)) beta = F.pad(beta, (0, pad_size)) g = F.pad(g, (0, pad_size)) total_sequence_length = sequence_length + pad_size scale = 1 / (query.shape[-1] ** 0.5) query = query * scale v_beta = value * beta.unsqueeze(-1) k_beta = key * beta.unsqueeze(-1) # reshape to chunks query, key, value, k_beta, v_beta = [ x.reshape(x.shape[0], x.shape[1], -1, chunk_size, x.shape[-1]) for x in (query, key, value, k_beta, v_beta) ] g = g.reshape(g.shape[0], g.shape[1], -1, chunk_size) mask = torch.triu(torch.ones(chunk_size, chunk_size, dtype=torch.bool, device=query.device), diagonal=0) # chunk decay g = g.cumsum(dim=-1) decay_mask = ((g.unsqueeze(-1) - g.unsqueeze(-2)).tril().exp().float()).tril() attn = -((k_beta @ key.transpose(-1, -2)) * decay_mask).masked_fill(mask, 0) for i in range(1, chunk_size): row = attn[..., i, :i].clone() sub = attn[..., :i, :i].clone() attn[..., i, :i] = row + (row.unsqueeze(-1) * sub).sum(-2) attn = attn + torch.eye(chunk_size, dtype=attn.dtype, device=attn.device) value = attn @ v_beta k_cumdecay = attn @ (k_beta * g.exp().unsqueeze(-1)) last_recurrent_state = ( torch.zeros(batch_size, num_heads, k_head_dim, v_head_dim).to(value) if initial_state is None else initial_state.to(value) ) core_attn_out = torch.zeros_like(value) mask = torch.triu(torch.ones(chunk_size, chunk_size, dtype=torch.bool, device=query.device), diagonal=1) # for each chunk for i in range(0, total_sequence_length // chunk_size): q_i, k_i, v_i = query[:, :, i], key[:, :, i], value[:, :, i] attn = (q_i @ k_i.transpose(-1, -2) * decay_mask[:, :, i]).masked_fill_(mask, 0) v_prime = (k_cumdecay[:, :, i]) @ last_recurrent_state v_new = v_i - v_prime attn_inter = (q_i * g[:, :, i, :, None].exp()) @ last_recurrent_state core_attn_out[:, :, i] = attn_inter + attn @ v_new last_recurrent_state = ( last_recurrent_state * g[:, :, i, -1, None, None].exp() + (k_i * (g[:, :, i, -1, None] - g[:, :, i]).exp()[..., None]).transpose(-1, -2) @ v_new ) if not output_final_state: last_recurrent_state = None core_attn_out = core_attn_out.reshape(core_attn_out.shape[0], core_attn_out.shape[1], -1, core_attn_out.shape[-1]) core_attn_out = core_attn_out[:, :, :sequence_length] core_attn_out = core_attn_out.transpose(1, 2).contiguous().to(initial_dtype) return core_attn_out, last_recurrent_state def torch_recurrent_gated_delta_rule( query, key, value, g, beta, initial_state, output_final_state, use_qk_l2norm_in_kernel=False ): initial_dtype = query.dtype if use_qk_l2norm_in_kernel: query = l2norm(query, dim=-1, eps=1e-6) key = l2norm(key, dim=-1, eps=1e-6) query, key, value, beta, g = [ x.transpose(1, 2).contiguous().to(torch.float32) for x in (query, key, value, beta, g) ] batch_size, num_heads, sequence_length, k_head_dim = key.shape v_head_dim = value.shape[-1] scale = 1 / (query.shape[-1] ** 0.5) query = query * scale core_attn_out = torch.zeros(batch_size, num_heads, sequence_length, v_head_dim).to(value) last_recurrent_state = ( torch.zeros(batch_size, num_heads, k_head_dim, v_head_dim).to(value) if initial_state is None else initial_state.to(value) ) for i in range(sequence_length): q_t = query[:, :, i] k_t = key[:, :, i] v_t = value[:, :, i] g_t = g[:, :, i].exp().unsqueeze(-1).unsqueeze(-1) beta_t = beta[:, :, i].unsqueeze(-1) last_recurrent_state = last_recurrent_state * g_t kv_mem = (last_recurrent_state * k_t.unsqueeze(-1)).sum(dim=-2) delta = (v_t - kv_mem) * beta_t last_recurrent_state = last_recurrent_state + k_t.unsqueeze(-1) * delta.unsqueeze(-2) core_attn_out[:, :, i] = (last_recurrent_state * q_t.unsqueeze(-1)).sum(dim=-2) if not output_final_state: last_recurrent_state = None core_attn_out = core_attn_out.transpose(1, 2).contiguous().to(initial_dtype) return core_attn_out, last_recurrent_state class Qwen3NextGatedDeltaNet(nn.Module): def __init__(self, config: Qwen3NextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.num_v_heads = config.linear_num_value_heads self.num_k_heads = config.linear_num_key_heads self.head_k_dim = config.linear_key_head_dim self.head_v_dim = config.linear_value_head_dim self.key_dim = self.head_k_dim * self.num_k_heads self.value_dim = self.head_v_dim * self.num_v_heads self.conv_kernel_size = config.linear_conv_kernel_dim self.layer_idx = layer_idx self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.layer_norm_epsilon = config.rms_norm_eps # QKV self.conv_dim = self.key_dim * 2 + self.value_dim self.conv1d = nn.Conv1d( in_channels=self.conv_dim, out_channels=self.conv_dim, bias=False, kernel_size=self.conv_kernel_size, groups=self.conv_dim, padding=self.conv_kernel_size - 1, ) # projection of the input hidden states projection_size_qkvz = self.key_dim * 2 + self.value_dim * 2 projection_size_ba = self.num_v_heads * 2 self.in_proj_qkvz = nn.Linear(self.hidden_size, projection_size_qkvz, bias=False) self.in_proj_ba = nn.Linear(self.hidden_size, projection_size_ba, bias=False) # time step projection (discretization) # instantiate once and copy inv_dt in init_weights of PretrainedModel self.dt_bias = nn.Parameter(torch.ones(self.num_v_heads)) A = torch.empty(self.num_v_heads).uniform_(0, 16) self.A_log = nn.Parameter(torch.log(A)) self.norm = ( Qwen3NextRMSNormGated(self.head_v_dim, eps=self.layer_norm_epsilon) if FusedRMSNormGated is None else FusedRMSNormGated( self.head_v_dim, eps=self.layer_norm_epsilon, activation=self.activation, device=torch.cuda.current_device(), dtype=config.dtype if config.dtype is not None else torch.get_default_dtype(), ) ) self.out_proj = nn.Linear(self.value_dim, self.hidden_size, bias=False) self.causal_conv1d_fn = causal_conv1d_fn self.causal_conv1d_update = causal_conv1d_update or torch_causal_conv1d_update self.chunk_gated_delta_rule = chunk_gated_delta_rule or torch_chunk_gated_delta_rule self.recurrent_gated_delta_rule = fused_recurrent_gated_delta_rule or torch_recurrent_gated_delta_rule if not is_fast_path_available: logger.warning_once( "The fast path is not available because one of the required library is not installed. Falling back to " "torch implementation. To install follow https://github.com/fla-org/flash-linear-attention#installation and" " https://github.com/Dao-AILab/causal-conv1d" ) def fix_query_key_value_ordering(self, mixed_qkvz, mixed_ba): """ Derives `query`, `key` and `value` tensors from `mixed_qkvz` and `mixed_ba`. """ new_tensor_shape_qkvz = mixed_qkvz.size()[:-1] + ( self.num_k_heads, 2 * self.head_k_dim + 2 * self.head_v_dim * self.num_v_heads // self.num_k_heads, ) new_tensor_shape_ba = mixed_ba.size()[:-1] + (self.num_k_heads, 2 * self.num_v_heads // self.num_k_heads) mixed_qkvz = mixed_qkvz.view(*new_tensor_shape_qkvz) mixed_ba = mixed_ba.view(*new_tensor_shape_ba) split_arg_list_qkvz = [ self.head_k_dim, self.head_k_dim, (self.num_v_heads // self.num_k_heads * self.head_v_dim), (self.num_v_heads // self.num_k_heads * self.head_v_dim), ] split_arg_list_ba = [self.num_v_heads // self.num_k_heads, self.num_v_heads // self.num_k_heads] query, key, value, z = torch.split(mixed_qkvz, split_arg_list_qkvz, dim=3) b, a = torch.split(mixed_ba, split_arg_list_ba, dim=3) # [b, sq, ng, np/ng * hn] -> [b, sq, np, hn] value = value.reshape(value.size(0), value.size(1), -1, self.head_v_dim) z = z.reshape(z.size(0), z.size(1), -1, self.head_v_dim) b = b.reshape(b.size(0), b.size(1), self.num_v_heads) a = a.reshape(a.size(0), a.size(1), self.num_v_heads) return query, key, value, z, b, a def forward( self, hidden_states: torch.Tensor, cache_params: Qwen3NextDynamicCache | None = None, cache_position: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, ): hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) # Set up dimensions for reshapes later batch_size, seq_len, _ = hidden_states.shape use_precomputed_states = ( cache_params is not None and cache_params.has_previous_state and seq_len == 1 and cache_position is not None ) # getting projected states from cache if it exists if cache_params is not None: conv_state = cache_params.conv_states[self.layer_idx] recurrent_state = cache_params.recurrent_states[self.layer_idx] projected_states_qkvz = self.in_proj_qkvz(hidden_states) projected_states_ba = self.in_proj_ba(hidden_states) query, key, value, z, b, a = self.fix_query_key_value_ordering(projected_states_qkvz, projected_states_ba) query, key, value = (x.reshape(x.shape[0], x.shape[1], -1) for x in (query, key, value)) mixed_qkv = torch.cat((query, key, value), dim=-1) mixed_qkv = mixed_qkv.transpose(1, 2) if use_precomputed_states: # 2. Convolution sequence transformation # NOTE: the conv state is updated in `causal_conv1d_update` mixed_qkv = self.causal_conv1d_update( mixed_qkv, conv_state, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation, ) else: if cache_params is not None: conv_state = F.pad(mixed_qkv, (self.conv_kernel_size - mixed_qkv.shape[-1], 0)) cache_params.conv_states[self.layer_idx] = conv_state if self.causal_conv1d_fn is not None: mixed_qkv = self.causal_conv1d_fn( x=mixed_qkv, weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, seq_idx=None, ) else: mixed_qkv = F.silu(self.conv1d(mixed_qkv)[:, :, :seq_len]) mixed_qkv = mixed_qkv.transpose(1, 2) query, key, value = torch.split( mixed_qkv, [ self.key_dim, self.key_dim, self.value_dim, ], dim=-1, ) query = query.reshape(query.shape[0], query.shape[1], -1, self.head_k_dim) key = key.reshape(key.shape[0], key.shape[1], -1, self.head_k_dim) value = value.reshape(value.shape[0], value.shape[1], -1, self.head_v_dim) beta = b.sigmoid() # If the model is loaded in fp16, without the .float() here, A might be -inf g = -self.A_log.float().exp() * F.softplus(a.float() + self.dt_bias) if self.num_v_heads // self.num_k_heads > 1: query = query.repeat_interleave(self.num_v_heads // self.num_k_heads, dim=2) key = key.repeat_interleave(self.num_v_heads // self.num_k_heads, dim=2) if not use_precomputed_states: core_attn_out, last_recurrent_state = self.chunk_gated_delta_rule( query, key, value, g=g, beta=beta, initial_state=None, output_final_state=cache_params is not None, use_qk_l2norm_in_kernel=True, ) else: core_attn_out, last_recurrent_state = self.recurrent_gated_delta_rule( query, key, value, g=g, beta=beta, initial_state=recurrent_state, output_final_state=cache_params is not None, use_qk_l2norm_in_kernel=True, ) # Update cache if cache_params is not None: cache_params.recurrent_states[self.layer_idx] = last_recurrent_state z_shape_og = z.shape # reshape input data into 2D tensor core_attn_out = core_attn_out.reshape(-1, core_attn_out.shape[-1]) z = z.reshape(-1, z.shape[-1]) core_attn_out = self.norm(core_attn_out, z) core_attn_out = core_attn_out.reshape(z_shape_og) core_attn_out = core_attn_out.reshape(core_attn_out.shape[0], core_attn_out.shape[1], -1) output = self.out_proj(core_attn_out) return output class Qwen3NextMLP(Qwen3MoeMLP): pass class Qwen3NextExperts(Qwen2MoeExperts): pass class Qwen3NextTopKRouter(Qwen2MoeTopKRouter): pass class Qwen3NextSparseMoeBlock(Qwen2MoeSparseMoeBlock): pass class Qwen3NextDecoderLayer(Qwen3MoeDecoderLayer): def __init__(self, config: Qwen3NextConfig, layer_idx: int): nn.Module.__init__(self) self.hidden_size = config.hidden_size # token mixer self.layer_type = config.layer_types[layer_idx] if self.layer_type == "linear_attention": self.linear_attn = Qwen3NextGatedDeltaNet(config, layer_idx) elif self.layer_type == "full_attention": self.self_attn = Qwen3NextAttention(config, layer_idx) if (layer_idx not in config.mlp_only_layers) and ( config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0 ): self.mlp = Qwen3NextSparseMoeBlock(config) else: self.mlp = Qwen3NextMLP(config, intermediate_size=config.intermediate_size) self.input_layernorm = Qwen3NextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Qwen3NextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Token Mixer if self.layer_type == "linear_attention": hidden_states = self.linear_attn( hidden_states=hidden_states, cache_params=past_key_values, cache_position=cache_position, attention_mask=attention_mask, ) elif self.layer_type == "full_attention": # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) # For the MoE layers, we need to unpack if isinstance(hidden_states, tuple): hidden_states, _ = hidden_states hidden_states = residual + hidden_states return hidden_states class Qwen3NextPreTrainedModel(PreTrainedModel): config: Qwen3NextConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Qwen3NextDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _keys_to_ignore_on_load_unexpected = [r"^mtp.*"] _can_record_outputs = { "router_logits": OutputRecorder(Qwen3NextTopKRouter, index=0), "hidden_states": Qwen3NextDecoderLayer, "attentions": Qwen3NextAttention, } _is_stateful = True @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Qwen3NextGatedDeltaNet): init.ones_(module.dt_bias) init.copy_(module.A_log, torch.empty_like(module.A_log).uniform_(0, 16).log_()) # We initialize with 0s to be 1 centered as the RMSNorm here does (1 + weight) elif isinstance(module, Qwen3NextRMSNorm): init.zeros_(module.weight) elif isinstance(module, Qwen3NextExperts): init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) elif isinstance(module, Qwen3NextSparseMoeBlock): init.normal_(module.gate.weight, mean=0.0, std=self.config.initializer_range) class Qwen3NextModel(Qwen3NextPreTrainedModel): def __init__(self, config: Qwen3NextConfig): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.layers = nn.ModuleList( [Qwen3NextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Qwen3NextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen3NextRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @merge_with_config_defaults @capture_outputs @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = Qwen3NextDynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) linear_attn_mask = self._update_linear_attn_mask(attention_mask, cache_position) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: layer_mask = linear_attn_mask if decoder_layer.layer_type == "linear_attention" else causal_mask hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=layer_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) def _update_linear_attn_mask(self, attention_mask, cache_position): """ NOTE: Left-padding is used for linear attention mask. No need for zeroing states when 1. Cached forward 2. Attending to all inputs """ linear_attn_mask = attention_mask if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)): linear_attn_mask = None return linear_attn_mask class Qwen3NextForCausalLM(MixtralForCausalLM): def __init__(self, config): super().__init__(config) self.num_experts = config.num_experts def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Qwen3NextDynamicCache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, output_router_logits: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, **kwargs: Unpack[TransformersKwargs], ) -> MoeCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, Qwen3NextForCausalLM >>> model = Qwen3NextForCausalLM.from_pretrained("Qwen/Qwen3-Next-80B-A3B-Instruct") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-Next-80B-A3B-Instruct") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" return super().forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_router_logits=output_router_logits, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) class Qwen3NextForSequenceClassification(LlamaForSequenceClassification): pass class Qwen3NextForTokenClassification(LlamaForTokenClassification): pass class Qwen3NextForQuestionAnswering(LlamaForQuestionAnswering): pass __all__ = [ "Qwen3NextForCausalLM", "Qwen3NextForQuestionAnswering", "Qwen3NextModel", "Qwen3NextPreTrainedModel", "Qwen3NextForSequenceClassification", "Qwen3NextForTokenClassification", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/qwen3_next/modular_qwen3_next.py", "license": "Apache License 2.0", "lines": 784, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/qwen3_next/test_modeling_qwen3_next.py
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device if is_torch_available(): import torch from transformers import ( Cache, Qwen3NextModel, ) from transformers.models.qwen3_next.modeling_qwen3_next import Qwen3NextDynamicCache from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, _test_eager_matches_sdpa_inference, ) class Qwen3NextModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = Qwen3NextModel def __init__(self, parent): super().__init__(parent=parent) self.layer_types = ["linear_attention", "full_attention"] self.linear_conv_kernel_dim = 2 self.linear_key_head_dim = 16 self.linear_value_head_dim = 16 self.linear_num_key_heads = 4 self.linear_num_value_heads = 8 @require_torch class Qwen3NextModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = Qwen3NextModelTester def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): "Qwen3-Next has a special Cache as it alternates with gated deltanet layers" self.assertIsInstance(past_key_values, Qwen3NextDynamicCache) # (batch, kv heads, seq_length, head_dim) num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads) head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) expected_shape = (batch_size, num_heads, seq_length, head_dim) attention_layer_indices = past_key_values.transformer_layers self.assertListEqual( [past_key_values.key_cache[idx].shape for idx in attention_layer_indices], [expected_shape] * len(attention_layer_indices), ) self.assertListEqual( [past_key_values.value_cache[idx].shape for idx in attention_layer_indices], [expected_shape] * len(attention_layer_indices), ) def _check_caches_are_equal(self, cache1: Cache, cache2: Cache): "Qwen3-Next has a special Cache as it alternates with gated deltanet layers" if not len(cache1) == len(cache2): raise ValueError("Both caches do not have the same number of layers.") num_layers = len(cache1) for idx in range(num_layers): if cache1.key_cache[idx] is not None: torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx]) torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx]) def test_attention_outputs(self): "Needs to be overwritten as Qwen3-Next alternates between attention layers and gated deltanet layers." config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # force eager attention to support output attentions config._attn_implementation = "eager" seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types)) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), sum(layer == "full_attention" for layer in config.layer_types)) self.assertListEqual(list(attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len]) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self_attentions = outputs.attentions self.assertEqual(out_len + 1, len(outputs)) self.assertEqual(len(self_attentions), sum(layer == "full_attention" for layer in config.layer_types)) self.assertListEqual(list(self_attentions[0].shape[-3:]), [config.num_attention_heads, seq_len, seq_len]) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, ): """ We need to overwrite this without the fp16 part of the dtype, because the slow path `torch_chunk_gated_delta_rule` is not robust enough (flaky test) in fp16 due to upscaling in fp32 and then downscaling to fp16 at the end """ if dtype == "fp16": self.skipTest("Not robust in fp16") _test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, ) @unittest.skip("The specific cache format cannot be instantiated from dp/ddp data.") def test_multi_gpu_data_parallel_forward(self): pass @require_torch_multi_gpu def test_can_use_device_map(self): """ Test that this model can be dispatched on multiple gpus. It's not obvious as the Cache is not standard, ant each layer need to use the correct device on which it reside (i.e. it needs to be lazy initialized). """ for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() inputs_dict = {k: v.to(0) if isinstance(v, torch.Tensor) else v for k, v in inputs_dict.items()} # We want the linear attention layer to reside on device 1 with the device map (i.e. not the first/default device), # to check if cache initialization is on the correct device config.layer_types = ["full_attention", "linear_attention"] model = model_class(config).eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) del model model = model_class.from_pretrained( tmpdirname, device_map={ "lm_head": 0, "model.embed_tokens": 0, "model.norm": 0, "model.layers.0": 0, "model.layers.1": 1, }, ) # Check that we indeed use 2 different devices for each layer self.assertTrue({param.device for param in model.model.layers[0].parameters()} == {torch.device(0)}) self.assertTrue({param.device for param in model.model.layers[1].parameters()} == {torch.device(1)}) # This should not crash _ = model.generate(**inputs_dict, max_new_tokens=5, min_new_tokens=5) @slow class Qwen3NextIntegrationTest(unittest.TestCase): pass
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/qwen3_next/test_modeling_qwen3_next.py", "license": "Apache License 2.0", "lines": 174, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/generation/continuous_batching/cache_manager.py
# Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from collections import deque from collections.abc import Iterator from math import ceil from typing import TypeVar from .requests import logger T = TypeVar("T") def reverse_enumerate(xs: list[T]) -> Iterator[tuple[int, T]]: index = len(xs) - 1 for x in xs[::-1]: yield index, x index -= 1 class Block: # TODO: rename to ShareableBlock and update the docs """A class to represent a block managed by the block manager. We say that a block is complete when the physical KV cache it points to is fully computed. A block can have a parent, which is the block that came before in the sequence. Once a block is complete, it is given a hash, which takes into account the tokens ids of the block, the layer (group_id) it belong to and its parent's hash (if there is a parent).""" def __init__(self, id_: int, parent_id: int | None, group_id: int) -> None: self.id: int = id_ self.parent_id: int | None = parent_id self.group_id: int = group_id self.hash: int | None = None self.ref_count: int = 1 def __repr__(self) -> str: return f"Block(id={self.id}, parent_id={self.parent_id}, group_id={self.group_id}, hash={self.hash}, ref_count={self.ref_count})" @property def is_complete(self) -> bool: return self.hash is not None class BlockManager: """A class to manage the number of free blocks and block re-use. When a block becomes in use, a flag is passed to determine if the block is shareable or not. If it is, then a Block object is created and kept track of internally. It can have the following states: - in use: one or more requests references this block, thus it cannot be written over. The number of requests referencing this block is stored as ref_count in the Block object. - un-initialized: the block points to a space in the KV cache tensor that contains no data yet. Those blocks can be given as free blocks to new requests without any overhead. - initialized: the block is complete and was used by one or more request that are finished. It contains KV cache data and its hash is stored in the hash table. If a new request needs a block with the same hash, we increase the ref_count of the block and remove it from the list of initialized blocks, because it is now in use. Still, the block can be freed if no un-initialized blocks are left. In that case, we remove its hash from the hash table. If the block is not shareable, we just use the block manager as a FIFO structure where blocks are either free or in use. Sharability is determined by the type of cache allocator: blocks created for full attention layers are shareable, while blocks created for sliding window attention layers are not. There is no structure to keep track of the blocks in use: if a block is neither un-initialized nor initialized, it is in use. """ def __init__(self, num_blocks: int, block_size: int) -> None: """Initializes the block manager with a given number of blocks (num_blocks) of size (block_size).""" self.num_blocks = num_blocks self.block_size = block_size self._uninit_block_ids = deque(range(num_blocks)) self._init_block_ids: dict[int, None] = {} # effectively act as an ordered set self._hash_to_id: dict[int, int] = {} self._id_to_block: dict[int, Block] = {} @property def num_free_blocks(self) -> int: """Returns the number of free blocks left. Both initialized and uninitialized blocks are considered free.""" return len(self._uninit_block_ids) + len(self._init_block_ids) def has_enough_free_blocks(self, n_blocks: int) -> bool: """Checks if there are enough free blocks to allocate the requested number of blocks (n_blocks). If there are not enough uninitialized blocks, we uninitialize the required number of initialized blocks.""" # Exit early if there are enough uninitialized blocks if len(self._uninit_block_ids) >= n_blocks: return True # Exit early if even after uninitializing all initialized blocks, there are not enough free blocks block_to_uninitialize = n_blocks - len(self._uninit_block_ids) if len(self._init_block_ids) < block_to_uninitialize: return False # Uninitialize the required amount of blocks for _ in range(block_to_uninitialize): id_to_uninitialize = self._init_block_ids.popitem()[0] block = self._id_to_block[id_to_uninitialize] # Since the block is initialized it must have a hash, thus no need to check .hash is not None self._hash_to_id.pop(block.hash) # ty:ignore[invalid-argument-type] self._uninit_block_ids.append(id_to_uninitialize) return True def get_free_blocks( self, n_blocks: int, last_block_id: int | None, shareable: bool, group_id: int ) -> list[int] | None: """Returns a list of (n_blocks) free block and mark them as no longuer free in the internal data structures. If the (shareable) flag is set to True, a Block object is created to keep track of the block, with the (last_block_id) to indicate the last block id in the sequence, also named the parent block. If the manager cannot find enough free blocks, it returns None.""" if not self.has_enough_free_blocks(n_blocks): return None allocated_block_ids = [self._uninit_block_ids.popleft() for _ in range(n_blocks)] # If the block is shareable, we keep track of the allocated blocks as partial blocks if shareable: for block_id in allocated_block_ids: block = Block(block_id, last_block_id, group_id) self._id_to_block[block_id] = block last_block_id = block_id # In both cases, we return the allocated block ids return allocated_block_ids def fork_blocks( self, parent_blocks: list[int], num_forks: int, shareable: bool, group_id: int ) -> tuple[list[list[int]] | None, list[int], list[int]]: """Fork a given list of (parent_blocks) as many times as (num_forks). If the blocks are (shareable), we use reference on the blocks that are complete. Otherwise, we allocate new blocks and keep track of their indices to later copy the physical cache. For instance, when forking 4 blocks for 2 children: Parent blocks: [0, 1, 2, 3], with all blocks being complete except the last one (block 3). ----------------------------------------- IF BLOCKS ARE NOT SHAREABLE ----------------------------------------- Forked blocks lists: [[5, 6, 7, 8], [9, 10, 11, 12]] Copy source: [0, 1, 2, 3, 0, 1, 2, 3] ↓ ↓ ↓ ↓ ↓ ↓ ↓ ↓ Copy destination: [5, 6, 7, 8, 9, 10, 11, 12] → 8 blocks are newly allocated and copied ----------------------------------------- IF BLOCKS ARE SHAREABLE --------------------------------------------- Forked blocks lists: [[0, 1, 2, 5], [0, 1, 2, 6]] Copy source: [ 3, 3] (block 3 is not complete so it's copied, not referenced) ↓ ↓ Copy destination: [ 5, 6] → only 2 blocks are newly allocated and copied """ # First phase: reference all complete blocks forked_by_reference = [] if shareable: for block_id in parent_blocks: block = self._id_to_block[block_id] if block.is_complete: forked_by_reference.append(block.id) block.ref_count += num_forks else: break # Early return if we have forked all blocks by reference blocks_to_copy = len(parent_blocks) - len(forked_by_reference) if blocks_to_copy == 0: return [forked_by_reference[:] for _ in range(num_forks)], [], [] # From now on, each child will have its own list of blocks forked_blocks_lists = [] copy_src = [] copy_dst = [] # Second phase: allocate new blocks if needed parent_id = forked_by_reference[-1] if forked_by_reference else None for _ in range(num_forks): allocated_block_ids = self.get_free_blocks(blocks_to_copy, parent_id, shareable, group_id) if allocated_block_ids is None: return None, [], [] forked_blocks_lists.append(forked_by_reference + allocated_block_ids) copy_src.extend(parent_blocks[-blocks_to_copy:]) copy_dst.extend(allocated_block_ids) return forked_blocks_lists, copy_src, copy_dst def increase_ref_count(self, block_id: int) -> None: """Increases the reference count of a given (block_id).""" block = self._id_to_block[block_id] block.ref_count += 1 if block.ref_count == 1: self._init_block_ids.pop(block_id) def decrease_ref_count(self, block_id: int) -> None: """Decreases the reference count of a given (block_id). If the reference count reaches 0, the block is no longer in use, and becomes initialized (if it was complete) or uninitialized (if it was incomplete).""" block = self._id_to_block[block_id] block.ref_count -= 1 if block.ref_count == 0: if block.is_complete: self._init_block_ids[block_id] = None else: self._id_to_block.pop(block_id) self._uninit_block_ids.append(block_id) def free_blocks(self, blocks: list[int], shareable: bool) -> None: """Marks a list of (blocks) as free. If the blocks were not (shareable), we simply add them to the uninitialized blocks queue. Otherwise, their new state depends on whether they are complete.""" if shareable: for block_id in blocks: self.decrease_ref_count(block_id) else: self._uninit_block_ids.extend(blocks) def uninitialize_unshared_block(self, block_id: int) -> None: """Marks a block as uninitialized. Raises an error if the block has more than one reference.""" # Make sure the block has only one reference and remove it from the block table block = self._id_to_block.pop(block_id) if block.ref_count > 1: raise RuntimeError(f"Block {block_id} has more than one reference: {block.ref_count = }") # Add the block to the uninitialized blocks queue self._uninit_block_ids.append(block_id) def mark_shareable_blocks_as_complete( self, num_complete_blocks: int, allocated_blocks: list[int], prompt_ids: list[int] ) -> None: """Among the list of (allocated_blocks), mark (num_complete_blocks) incomplete blocks as now complete. The list of (prompt_ids) is used to compute the hash of the new block.""" # Look for the first complete block, starting from the last block in the sequence parent_hash = None incomplete_blocks: list[tuple[int, Block]] = [] for i, block_id in reverse_enumerate(allocated_blocks): block = self._id_to_block[block_id] if block.is_complete: parent_hash = block.hash break incomplete_blocks.append((i, block)) # Now go through the incomplete blocks and updated them new_parent_id = None while incomplete_blocks: i, block = incomplete_blocks.pop() # If the parent id has been updated, we apply the change if new_parent_id is not None: block.parent_id = new_parent_id new_parent_id = None # If we have set the hash for all complete blocks, we can stop if num_complete_blocks == 0: break # Otherwise, we compute the hash num_complete_blocks -= 1 tokens = prompt_ids[i * self.block_size : (i + 1) * self.block_size] block.hash = self.compute_hash(parent_hash, tokens, block.group_id) existing_block_id = self._hash_to_id.get(block.hash) # If their was a different block with the same hash, we reference the existing block instead if existing_block_id is not None: if existing_block_id == block.id: # This should not happen, but is not a problem in itself, so we just log a warning logger.warning(f"Block {block.id} was marked as complete more than once") else: logger.debug(f"Found existing block {existing_block_id} for block {block.id}") allocated_blocks[i] = existing_block_id new_parent_id = existing_block_id self.increase_ref_count(existing_block_id) self.uninitialize_unshared_block(block.id) # Otherwise, we add the completed block to the hash table else: logger.debug(f"Adding new block {block.id} (group {block.group_id}) with hash {block.hash}") self._hash_to_id[block.hash] = block.id # Update loop variables parent_hash = block.hash def compute_hash(self, parent_hash: int | None, tokens: list[int], group_id: int) -> int: """Computes the hash of a block identified by the (tokens) it contains, its (parent_hash) and the layer (group_id) it belong to. If the block has no parent, the parent hash is None.""" return hash((parent_hash, tuple(tokens), group_id)) class CacheAllocator(ABC): """Abstract base class for cache managers. Cache managers keep track of per-request cache allocations, determine when a new physical block needs to be allocated and compute physical indices for reading or writing to the cache.""" _index: int block_table: dict[str, list[int]] # request_id -> list of block_ids allocated to the request uses_block_sharing: bool # flag to determine if the blocks are shareable @abstractmethod def allocate_blocks(self, n_blocks: int, request_id: str, block_manager: BlockManager) -> int | None: """Allocates (n_blocks) for a given (request_id) using the (block_manager). Returns the num of blocks allocated if successful and None otherwise.""" def free_blocks(self, request_id: str, block_manager: BlockManager) -> None: """Frees all blocks associated with a (request_id) using the (block_manager).""" if request_id in self.block_table: blocks_to_free = self.block_table.pop(request_id) block_manager.free_blocks(blocks_to_free, shareable=self.uses_block_sharing) else: logger.warning( f"CacheAllocator {self._index} attempted to free blocks for non-existent request_id: {request_id}" ) @abstractmethod def get_read_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]: """Returns the physical indices of where to read request_id's cache in the cache tensor.""" @abstractmethod def get_write_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]: """Returns the physical indices of where to write request_id's cache in the cache tensor.""" def fork_blocks( self, parent_request_id: str, children_request_ids: list[str], block_manager: BlockManager ) -> tuple[list[int], list[int]]: """Forks the cache blocks of a (parent_request_id) to a list of (children_request_ids). To manage the blocks, the (block_manager) is used. When forking, the child's block are either shared with the parent, or they need to be copied from the parent. Hence we return two lists of blocks that need to be copied: one for the source and one for the destination.""" # Sanity checks if parent_request_id not in self.block_table: raise ValueError(f"No block table found for request {parent_request_id}") # Actual forking parent_blocks = self.block_table[parent_request_id] list_forked_blocks, copy_src, copy_dst = block_manager.fork_blocks( parent_blocks=parent_blocks, num_forks=len(children_request_ids), shareable=self.uses_block_sharing, group_id=self._index, ) if list_forked_blocks is None: raise ValueError(f"Failed to fork blocks for request {parent_request_id}") # Update the block table for all children requests for children_request_id, forked_blocks in zip(children_request_ids, list_forked_blocks): if children_request_id in self.block_table: raise ValueError(f"Block table already exists for request {children_request_id}") self.block_table[children_request_id] = forked_blocks return copy_src, copy_dst class FullAttentionCacheAllocator(CacheAllocator): """Cache manager for a group of full attention layers.""" def __init__(self, index: int, block_size: int, allow_block_sharing: bool) -> None: """Initializes the cache manager for a group of full attention layers. Args: - index: the index of the associated layer group - block_size: the size of the blocks in the cache """ self._index = index self.uses_block_sharing = allow_block_sharing self.block_size = block_size self.block_table = {} def allocate_blocks(self, n_blocks: int, request_id: str, block_manager: BlockManager) -> int | None: """Allocate (n_blocks) for a given (request_id) using the (block_manager). Returns the number of blocks allocated if successful and None otherwise. For group of full attention layers, we always allocate the number of requested blocks.""" # Make sure the request_id is in the block table and get the first block id block_table = self.block_table.get(request_id, []) if block_table: last_block_id = block_table[-1] else: self.block_table[request_id] = block_table # TODO: check the impact of making this a deque last_block_id = None # Actual allocation, return early if failed allocated_blocks = block_manager.get_free_blocks(n_blocks, last_block_id, self.uses_block_sharing, self._index) if allocated_blocks is None: return None block_table.extend(allocated_blocks) return n_blocks def get_read_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]: """Returns the physical indices of where to read request_id's cache. For a group of full attention layers, we first write the new cache to the cache tensor and then read the entire cache from the beginning to the end.""" # Retrieve the block table for the request and raise an error if it doesn't exist block_table = self.block_table.get(request_id) if block_table is None: raise ValueError(f"No block table found for request {request_id}") # Compute auxiliary variable so we can perform only two loops total_length = past_length + query_length num_full_blocks = total_length // self.block_size remainder = total_length % self.block_size # Compute the physical indices physical_indices = [] for b in range(num_full_blocks): start = block_table[b] * self.block_size physical_indices.extend(range(start, start + self.block_size)) if remainder: start = block_table[num_full_blocks] * self.block_size physical_indices.extend(range(start, start + remainder)) return physical_indices def get_write_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]: """Returns the physical indices for writing to the cache. For a group of full attention layers, we write the new cache as a continuation of the existing cache for the same request.""" block_table = self.block_table.get(request_id) if block_table is None: raise ValueError(f"No block table found for request {request_id}") # Compute auxiliary variables so we can perform only one loop start_block = past_length // self.block_size start_offset = past_length % self.block_size end_pos = past_length + query_length end_block = (end_pos - 1) // self.block_size # -1 because if end_pos == block_size, we still end on block 0 # Compute the physical indices physical_indices = [] for b in range(start_block, end_block + 1): block_start = block_table[b] * self.block_size # First block may start mid-block, last block may end mid-block local_start = start_offset if b == start_block else 0 local_end = (end_pos - 1) % self.block_size + 1 if b == end_block else self.block_size physical_indices.extend(range(block_start + local_start, block_start + local_end)) return physical_indices class SlidingAttentionCacheAllocator(CacheAllocator): """Cache manager for sliding window attention layers.""" def __init__(self, index: int, block_size: int, sliding_window: int) -> None: """Initializes the cache manager for a group of sliding window attention layers. Args: - index: the index of the associated layer group - block_size: the size of the blocks in the cache - sliding_window: the size of the sliding window """ self._index = index self.uses_block_sharing = False self.block_size = block_size self.sliding_window = sliding_window self._max_blocks_per_request = ceil(self.sliding_window / self.block_size) self.block_table = {} def allocate_blocks(self, n_blocks: int, request_id: str, block_manager: BlockManager) -> int | None: """Allocate (n_blocks) for a given (request_id) using the (block_manager). Returns the number of blocks allocated otherwise. For group of sliding window attention layers, we only allocate up to the point where we can fit an entire sliding window in the cache tensor.""" if request_id not in self.block_table: self.block_table[request_id] = [] # Early return if we are already at the max number of blocks per request already_allocated = len(self.block_table[request_id]) if already_allocated == self._max_blocks_per_request: return 0 # Compute actual number of blocks to allocate after_allocation = min(already_allocated + n_blocks, self._max_blocks_per_request) actual_n_blocks = after_allocation - already_allocated # Classic allocation allocated_blocks = block_manager.get_free_blocks( actual_n_blocks, None, self.uses_block_sharing, self._index ) # no block sharing w/ sliding window if allocated_blocks is None: return None self.block_table[request_id].extend(allocated_blocks) return actual_n_blocks def get_read_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]: """Returns the physical indices of where to read request_id's cache in the cache tensor. For a group of sliding window attention layers, we read from the cache tensor before writing on it, because the new cache can overwrite the old one. To form the cache + new key / values states, we read the at most sliding_window - 1 cache page and then manually add the new key / values states after. Hence the -1 indices which indicate where to store the new key or values indices.""" # Retrieve the block table for the request and raise an error if it doesn't exist block_table = self.block_table.get(request_id) if block_table is None: raise ValueError(f"No block table found for request {request_id}") # Apply sliding window start_index = 0 if past_length < self.sliding_window else past_length % self.sliding_window cache_length = min(past_length, self.sliding_window - 1) # Compute the physical indices physical_indices = [] for i in range(start_index, start_index + cache_length): i %= self.sliding_window block_idx = i // self.block_size block_offset = i % self.block_size physical_index = block_table[block_idx] * self.block_size + block_offset physical_indices.append(physical_index) return physical_indices + [-1] * query_length def get_write_indices(self, request_id: str, past_length: int, query_length: int) -> list[int]: """Returns the physical indices of where to write request_id's cache in the cache tensor. For a group of sliding window attention layers, we write the new cache in rolling-buffer kind of way: if we reach the end of the allocated physical cache, we start writing from the beginning of the physical cache again.""" # Retrieve the block table for the request and raise an error if it doesn't exist block_table = self.block_table.get(request_id) if block_table is None: raise ValueError(f"No block table found for request {request_id}") # Apply sliding window start_index = past_length % self.sliding_window cache_length = min(query_length, self.sliding_window) padding_length = query_length - cache_length # Compute the physical indices physical_indices = [] for i in range(start_index, start_index + cache_length): i %= self.sliding_window block_idx = i // self.block_size block_offset = i % self.block_size physical_index = block_table[block_idx] * self.block_size + block_offset physical_indices.append(physical_index) if padding_length > 0: physical_indices = [-1] * padding_length + physical_indices return physical_indices
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/generation/continuous_batching/cache_manager.py", "license": "Apache License 2.0", "lines": 439, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/generation/test_continuous_batching.py
# Copyright 2025 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import itertools import unittest from unittest.mock import patch import torch from parameterized import parameterized from transformers import ( AutoConfig, AutoModelForCausalLM, AutoTokenizer, CompileConfig, GenerationConfig, LogitsProcessorList, ) from transformers.generation.continuous_batching.cache import ( PagedAttentionCache, SlidingAttentionCacheAllocator, group_layers_by_attn_type, ) from transformers.generation.continuous_batching.cache_manager import FullAttentionCacheAllocator from transformers.generation.continuous_batching.continuous_api import ContinuousBatchProcessor from transformers.generation.continuous_batching.input_outputs import build_attention_mask from transformers.testing_utils import ( Expectations, require_deterministic_for_xpu, require_flash_attn, require_torch_accelerator, slow, torch_device, ) from transformers.utils import is_flash_attn_2_available, is_kernels_available def flush_memory(flush_compile: bool = True) -> None: gc.collect() # If needed, flush everything related to torch.compile if flush_compile: # Dynamo resets torch._dynamo.reset() torch._dynamo.reset_code_caches() if hasattr(torch._inductor, "codecache"): # Clear FX graph cache if hasattr(torch._inductor.codecache, "FxGraphCache"): torch._inductor.codecache.FxGraphCache.clear() # Clear PyCodeCache if hasattr(torch._inductor.codecache, "PyCodeCache"): torch._inductor.codecache.PyCodeCache.cache_clear() # Clear TritonFuture cache (for async compilation) if hasattr(torch._inductor.codecache, "TritonFuture"): if hasattr(torch._inductor.codecache.TritonFuture, "_compile_cache"): torch._inductor.codecache.TritonFuture._compile_cache.clear() # Clear CUDA cache if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() elif torch.xpu.is_available(): torch.xpu.empty_cache() torch.xpu.synchronize() gc.collect() class ContinuousBatchingNonGenerationTest(unittest.TestCase): @parameterized.expand( [ (None, None, "0"), (None, 4096, "0"), ("f", None, "0"), ("ffff", None, "0000"), ("sssss", 4096, "00000"), ("fs", 4096, "01"), ("ssfssf", 4096, "001221"), ("ssssf", 4096, "01234"), ("fffsffs", 4096, "0123456"), ] ) def test_group_layers( self, layer_types_str: str | None, sliding_window: int | None, expected_groups: str, ) -> None: """Test the layer grouping algorithm of the hybrid allocator.""" # Take a config and change the layer_types attribute to the mix we want config = AutoConfig.from_pretrained("HuggingFaceTB/SmolLM-1.7B") if layer_types_str is not None: layer_types = [{"f": "full_attention", "s": "sliding_window"}[char] for char in layer_types_str] else: layer_types = None config.num_hidden_layers = len(expected_groups) config.layer_types = layer_types config.sliding_window = sliding_window expected_lg = {} for i, group in enumerate(expected_groups): group = int(group) expected_lg[group] = expected_lg.get(group, []) + [i] expected_layer_groups = [expected_lg[i] for i in sorted(expected_lg.keys())] # Test layer groups formation layer_groups, group_types = group_layers_by_attn_type(config) self.assertEqual( sorted(expected_layer_groups), sorted(layer_groups), f"Test failed for: {layer_types_str = }, {sliding_window = }, {expected_layer_groups = }, {layer_groups = }", ) # If layer_types is provided, check that group_types matches the type of the all layers in each group if layer_types is not None: for layer_group, group_type in zip(layer_groups, group_types): layer_types = [config.layer_types[i] for i in layer_group] self.assertEqual(layer_types, [group_type] * len(layer_types)) # If layer_types is None, all groups should be of the same type else: for group_type in group_types: sliding_window = getattr(config, "sliding_window", None) expected_group_type = "sliding_attention" if sliding_window is not None else "full_attention" self.assertEqual( group_type, expected_group_type, f"Test failed for: {layer_types_str = }, {sliding_window = }, {group_types = }", ) @parameterized.expand( [ ([0, 4], [0, 4], 1, ["1000", "1100", "1110", "1111"]), ([0, 4], [0, 4], 2, ["1000", "1100", "0110", "0011"]), ([0, 3], [0, 5], 1, ["11100", "11110", "11111"]), ([0, 3], [0, 5], 3, ["11100", "01110", "00111"]), ([0, 3, 6], [0, 3, 6], 1, ["100000", "110000", "111000", "000100", "000110", "000111"]), ([0, 3, 6], [0, 3, 6], 2, ["100000", "110000", "011000", "000100", "000110", "000011"]), ] ) def test_attention_mask( self, cumulative_seqlens_q: list[int], cumulative_seqlens_k: list[int], sliding_window: int, # the sliding window size, 1 means no sliding window str_expected_mask_lines: list[str], # the attention mask, broken down by line as a string of 0s and 1s ) -> None: """Tests the correctness of the attention mask used in the continuous batching API.""" # Build expected mask minus_inf = torch.finfo(torch.float32).min expected_mask = torch.empty((cumulative_seqlens_q[-1], cumulative_seqlens_k[-1]), dtype=torch.float32) for i, line in enumerate(str_expected_mask_lines): expected_mask[i, :] = torch.tensor([minus_inf if c == "0" else 0 for c in line]) # Build actual mask actual_mask = torch.full_like(expected_mask, minus_inf) # function modifies in place build_attention_mask(actual_mask, cumulative_seqlens_q, cumulative_seqlens_k, sliding_window) # Check that the actual mask matches the expected mask matches = (expected_mask == actual_mask).all() # If it doesn't match, print the masks in a readable form and fail the test if not matches: str_mask = [ "".join("1" if x == 0 else "0" for x in token_attn_vector) for token_attn_vector in actual_mask ] str_mask = "\n".join(str_mask) str_expected_mask = "\n".join(str_expected_mask_lines) self.fail( f"Test failed for: {cumulative_seqlens_q = }, {cumulative_seqlens_k = }, {sliding_window = }\n" f"Expected mask:\n{str_expected_mask}\n" f"Actual mask:\n{str_mask}" ) @parameterized.expand( [ # Case 1: Only full attention groups, allocation succeeds # needed_blocks = 2 * 1 = 2, free_blocks = 10 -> 2 <= 10 = True (2, 0, 1, 0, 0, 10, True), # Case 2: Only full attention groups, allocation fails # needed_blocks = 5 * 2 = 10, free_blocks = 5 -> 10 <= 5 = False (5, 0, 2, 0, 0, 5, False), # Case 3: Mixed attention, sliding window not yet full # needed_blocks = 2 * 1 + min(4 - 0, 2) * 1 = 2 + 2 = 4, free_blocks = 10 -> 4 <= 10 = True (2, 0, 1, 1, 4, 10, True), # Case 4: Mixed attention, sliding window partially filled # needed_blocks = 3 * 1 + min(4 - 2, 3) * 1 = 3 + 2 = 5, free_blocks = 5 -> 5 <= 5 = True (3, 2, 1, 1, 4, 5, True), # Case 5: Mixed attention, sliding window already full (allocated_blocks >= max_sliding) # blocks_left = max(4 - 5, 0) = 0, needed_blocks = 3 * 1 + 0 = 3, free_blocks = 5 -> 3 <= 5 = True (3, 5, 1, 1, 4, 5, True), # Case 6: Mixed attention, sliding window full, allocation fails due to full attention # blocks_left = max(4 - 4, 0) = 0, needed_blocks = 6 * 1 + 0 = 6, free_blocks = 5 -> 6 <= 5 = False (6, 4, 1, 1, 4, 5, False), # Case 7: Multiple full attention groups # needed_blocks = 3 * 2 = 6, free_blocks = 6 -> 6 <= 6 = True (3, 0, 2, 0, 0, 6, True), # Case 8: Multiple sliding attention groups, not full # needed_blocks = 2 * 1 + min(4 - 1, 2) * 2 = 2 + 4 = 6, free_blocks = 6 -> 6 <= 6 = True (2, 1, 1, 2, 4, 6, True), # Case 9: Edge case - requesting 0 blocks always succeeds # needed_blocks = 0, free_blocks = 0 -> 0 <= 0 = True (0, 0, 1, 1, 4, 0, True), # Case 10: Edge case - exactly enough blocks # needed_blocks = 2 * 1 + min(3 - 0, 2) * 1 = 2 + 2 = 4, free_blocks = 4 -> 4 <= 4 = True (2, 0, 1, 1, 3, 4, True), ] ) def test_continuous_batching_will_allocation_be_successful( self, num_requested_blocks: int, allocated_blocks: int, num_full_attention_groups: int, num_sliding_attention_groups: int, max_sliding_window_blocks_per_request: int, num_free_blocks: int, expected_result: bool, ) -> None: """Test the will_allocation_be_successful method of PagedAttentionCache, overloading the elevant attributes of a dummy cache.""" if torch_device is None: # this check which should always pass and helps with type checking raise ValueError(f"This requires a torch accelerator, yet {torch_device = } and the test was not skipped.") # Create the cache cache = PagedAttentionCache( config=AutoConfig.from_pretrained("HuggingFaceTB/SmolLM-1.7B", attn_implementation="sdpa"), generation_config=GenerationConfig(num_blocks=8, block_size=16, max_batch_tokens=8), device=torch_device, ) # Overload cache parameters to match test scenario cache.num_full_attention_groups = num_full_attention_groups cache.num_sliding_attention_groups = num_sliding_attention_groups cache.max_sliding_window_blocks_per_request = max_sliding_window_blocks_per_request # Overload the cache get_num_free_blocks method cache.get_num_free_blocks = lambda: num_free_blocks # type: ignore[assignment] # Test the method result = cache.will_allocation_be_successful(num_requested_blocks, allocated_blocks) self.assertEqual( result, expected_result, f"Failed for: {num_requested_blocks=}, {allocated_blocks=}, {num_full_attention_groups=}, " f"{num_sliding_attention_groups=}, {max_sliding_window_blocks_per_request=}, {num_free_blocks=}. " f"Expected {expected_result}, got {result}", ) @parameterized.expand( [ # (block_size, block_table, past_length, query_length, expected_indices) # Basic cases (32, [0, 1, 2], 0, 16, list(range(16))), (32, [0, 1, 2], 0, 32, list(range(32))), (32, [0, 1, 2], 0, 64, list(range(64))), # Non-contiguous blocks (32, [0, 3, 6], 0, 64, list(range(32)) + list(range(96, 128))), (32, [2, 5, 8], 0, 32, list(range(64, 96))), # With past_length (read still starts from 0) (32, [0, 1, 2], 16, 16, list(range(32))), (32, [0, 1, 2], 31, 2, list(range(33))), # Partial last block (32, [0, 1, 2], 0, 50, list(range(32)) + list(range(32, 50))), # Different block sizes (16, [0, 1, 2, 3], 0, 48, list(range(48))), (64, [0, 1], 0, 100, list(range(100))), ] ) def test_full_attention_get_read_indices( self, block_size: int, block_table: list[int], past_length: int, query_length: int, expected_indices: list[int], ) -> None: """Test FullAttentionCacheAllocator.get_read_indices returns correct physical indices.""" allocator = FullAttentionCacheAllocator(index=0, block_size=block_size, allow_block_sharing=False) request_id = "test_request" allocator.block_table[request_id] = block_table result = allocator.get_read_indices(request_id, past_length, query_length) self.assertEqual( result, expected_indices, f"Failed for {block_size=}, {block_table=}, {past_length=}, {query_length=}", ) @parameterized.expand( [ # (block_size, block_table, past_length, query_length, expected_indices) # Start of sequence (32, [0, 1, 2], 0, 16, list(range(16))), (32, [0, 1, 2], 0, 32, list(range(32))), # Continue in same block (32, [0, 1, 2], 16, 16, list(range(16, 32))), # Cross block boundary (32, [0, 1, 2], 30, 4, list(range(30, 34))), (32, [0, 1, 2], 31, 2, [31, 32]), # Non-contiguous blocks (32, [0, 3, 6], 30, 4, [30, 31, 96, 97]), (32, [2, 5, 8], 60, 10, list(range(188, 192)) + list(range(256, 262))), # Decode step (single token) (32, [0, 1, 2], 0, 1, [0]), (32, [0, 1, 2], 31, 1, [31]), (32, [0, 1, 2], 32, 1, [32]), (32, [0, 1, 2], 63, 1, [63]), # Different block sizes (16, [0, 1, 2, 3], 14, 4, [14, 15, 16, 17]), (64, [0, 1], 60, 10, list(range(60, 70))), ] ) def test_full_attention_get_write_indices( self, block_size: int, block_table: list[int], past_length: int, query_length: int, expected_indices: list[int], ) -> None: """Test FullAttentionCacheAllocator.get_write_indices returns correct physical indices.""" allocator = FullAttentionCacheAllocator(index=0, block_size=block_size, allow_block_sharing=False) request_id = "test_request" allocator.block_table[request_id] = block_table result = allocator.get_write_indices(request_id, past_length, query_length) self.assertEqual( result, expected_indices, f"Failed for {block_size=}, {block_table=}, {past_length=}, {query_length=}", ) @require_torch_accelerator class ContinuousBatchingGenerationTest(unittest.TestCase): # -----------------------------------------------Parity tests----------------------------------------------- # # Ensure continuous batching and non-continuous batching generation produce the same outputs # # ---------------------------------------------------------------------------------------------------------- # @require_deterministic_for_xpu def _test_continuous_batching_parity( self, model_id: str, allow_block_sharing: bool, attn_implementation: str, use_cuda_graph: bool, use_compile: bool, use_async: bool, max_new_tokens: int = 20, num_blocks: int | None = None, num_repeat_prompts: int = 1, ) -> None: """Tests the parity between continuous batching and non-continuous batching generation.""" # Skip the test if Flash Attention 2 is required but not available if attn_implementation == "flash_attention_2" and not (is_flash_attn_2_available() or is_kernels_available()): self.skipTest("Flash Attention 2 is not available and neither is the kernels library. Skipping test.") # Skip the test if cuda graph is on but the device is not CUDA if use_cuda_graph and torch_device != "cuda": self.skipTest("CUDA graph is only supported on CUDA devices. Skipping test.") # Prepare continuous batching inputs tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") if hasattr(tokenizer, "eos_token"): tokenizer.pad_token = tokenizer.eos_token user_messages = [ "Josh decides to try flipping a house. He buys a house for $80,000 and then puts in $50,000 in repairs. This increased the value of the house by 150%. How much profit did he make?", "A robe takes 2 bolts of blue fiber and half that much white fiber. How many bolts in total does it take?", "A basket contains 25 oranges among which 1 is bad, 20% are unripe, 2 are sour and the rest are good. How many oranges are good?", ] # fmt: skip if num_repeat_prompts > 1: user_messages = user_messages * num_repeat_prompts chats = [[{"role": "user", "content": user_message}] for user_message in user_messages] tokenized = [tokenizer.apply_chat_template(chat, add_generation_prompt=True) for chat in chats] input_ids = [(x if isinstance(x, list) else x["input_ids"]) for x in tokenized] # Eager and SDPA implementations get a precision boost to account for the fact that an attention mask is used in # continuous batching but not in generate dtype = "auto" if attn_implementation == "flash_attention_2" else torch.float32 # Generation with continuous batching model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation=attn_implementation, dtype=dtype) if ( attn_implementation == "flash_attention_2" and torch_device == "cpu" and getattr(model.config, "sliding_window", None) is not None and model.config.sliding_window > 0 ): self.skipTest("Flash Attention 2 with sliding window attention is not supported on CPU. Skipping test.") model = model.to(torch_device).eval() # type: ignore[assignment] <- torch_device is always w/ the decorator model.generation_config.max_new_tokens = max_new_tokens model.generation_config.do_sample = False model.generation_config.use_cuda_graph = use_cuda_graph model.generation_config.num_blocks = num_blocks if use_compile: model.generation_config.compile_config = CompileConfig(fullgraph=True, mode="default") # Generation with continuous batching continuous_batching_outputs = model.generate_batch( inputs=input_ids, generation_config=model.generation_config, allow_block_sharing=allow_block_sharing, use_async=use_async, ) # Prepare non-continuous batching inputs inputs = tokenizer.apply_chat_template( chats, add_generation_prompt=True, return_tensors="pt", padding=True, return_dict=True, return_attention_mask=True, ) num_input_tokens = inputs.input_ids.shape[1] # Generation without continuous batching model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation=attn_implementation, dtype=dtype) model = model.to(torch_device).eval() # type: ignore[assignment] <- torch_device is always w/ the decorator model.generation_config.max_new_tokens = max_new_tokens model.generation_config.do_sample = False model.generation_config.use_cuda_graph = use_cuda_graph if use_compile: model.generation_config.compile_config = CompileConfig(fullgraph=True, mode="default") generate_outputs = model.generate(**inputs.to(torch_device), generation_config=model.generation_config) for i, user_message in enumerate(user_messages): # Find the corresponding request in the continuous batching outputs input_tokens = inputs.input_ids[i][inputs.attention_mask[i] == 1].tolist() key_to_pop = None for key, state in continuous_batching_outputs.items(): if state.prompt_ids == input_tokens: key_to_pop = key break if key_to_pop is None: self.fail(f"Request {i} not found in continuous batching outputs") continuous_batching_output = continuous_batching_outputs.pop(key_to_pop).generated_tokens generate_output = generate_outputs[i][num_input_tokens:].tolist() while generate_output[-1] == model.generation_config.pad_token_id: generate_output.pop() if continuous_batching_output != generate_output: decoded_continuous_batching_output = tokenizer.decode(continuous_batching_output) decoded_generate_output = tokenizer.decode(generate_output) msg = f"Test failed for {model_id = } {allow_block_sharing = }, {attn_implementation = }, {use_cuda_graph = }, {use_compile = }\n" msg += f"User message : {repr(user_message)}\n" msg += f"Continuous batching output: {repr(decoded_continuous_batching_output)}\n" msg += f"Generate output : {repr(decoded_generate_output)}" self.fail(msg) del model flush_memory(flush_compile=use_compile) @parameterized.expand( list( itertools.product( [False, True], ["eager", "sdpa", "flash_attention_2"], [False, True], [False, True], ) ) ) @slow def test_continuous_batching_config_combinations( self, allow_block_sharing: bool, attn_implementation: str, use_cuda_graph: bool, use_compile: bool, ) -> None: model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" self._test_continuous_batching_parity( model_id, allow_block_sharing, attn_implementation, use_cuda_graph, use_compile, use_async=False ) # FIXME: Qwen2.5-0.5B-Instruct is not here because it's broken (it uses a repetition penalty logits processor) # TODO: replace gemma2 with a tiny version of GPT-OSS? That way we can test sliding window AND attention sink @parameterized.expand( list( itertools.product( ["TinyLlama/TinyLlama-1.1B-Chat-v1.0", "google/gemma-2-2b-it"], [False, True], [False, True], ) ) ) @slow def test_continuous_batching_diverse_models(self, model_id: str, use_cuda_graph: bool, use_compile: bool) -> None: try: self._test_continuous_batching_parity( model_id, True, "flash_attention_2", use_cuda_graph, use_compile, use_async=False ) finally: flush_memory(flush_compile=use_compile) def test_continuous_batching_fast(self) -> None: model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" self._test_continuous_batching_parity( model_id, allow_block_sharing=False, attn_implementation="sdpa", use_cuda_graph=False, use_compile=False, use_async=False, ) def test_continuous_batching_long_generate(self) -> None: model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" self._test_continuous_batching_parity( model_id, allow_block_sharing=True, attn_implementation="flash_attention_2", use_cuda_graph=True, use_compile=True, use_async=False, max_new_tokens=80, ) def test_continuous_batching_few_blocks(self) -> None: """This test verifies that generation works with a very small number of blocks, ie. small enough that we need to offload a request at some point. To add more complexity, we repeat the same prompt 4 times and enable prefix sharing.""" model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Patch soft_reset_one_request to verify it's called at least once original_soft_reset = ContinuousBatchProcessor.soft_reset_one_request with patch.object( ContinuousBatchProcessor, "soft_reset_one_request", autospec=True, side_effect=original_soft_reset ) as mock_soft_reset: self._test_continuous_batching_parity( model_id=model_id, allow_block_sharing=True, attn_implementation="sdpa", use_cuda_graph=True, use_compile=False, use_async=False, max_new_tokens=30, num_blocks=4, num_repeat_prompts=4, ) self.assertTrue(mock_soft_reset.called, "Soft reset method was not called.") @parameterized.expand( list( itertools.product( ["sdpa", "flash_attention_2"], [False, True], [False, True], ) ) ) @slow def test_continuous_batching_async( self, attn_implementation: str, use_cuda_graph: bool, use_compile: bool ) -> None: model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" self._test_continuous_batching_parity( model_id, allow_block_sharing=True, attn_implementation=attn_implementation, use_cuda_graph=use_cuda_graph, use_compile=use_compile, use_async=True, ) # ---------------------------------------Streaming tests--------------------------------------- # # Ensures the requests have the right behavior with and without streaming # # --------------------------------------------------------------------------------------------- # def _test_streaming_or_not_request(self, with_streaming: bool, with_non_streaming: bool) -> None: model_id = "Qwen/Qwen2.5-0.5B-Instruct" max_new_tokens = 3 model = AutoModelForCausalLM.from_pretrained(model_id) manager = model.init_continuous_batching() manager.logit_processor = LogitsProcessorList() manager.start() tokenizer = AutoTokenizer.from_pretrained(model_id) messages = [{"content": "What is the Transformers library known for?", "role": "user"}] inputs = tokenizer.apply_chat_template( messages, return_tensors="pt", add_generation_prompt=True, return_dict=False ).to(model.device)[0] # Test with non-streaming if with_non_streaming: request_id = manager.add_request(inputs, max_new_tokens=max_new_tokens, streaming=False) # In non-streaming mode, the total number of generated tokens is equal to the max new tokens chunk = next(manager.request_id_iter(request_id)) self.assertEqual(len(chunk.generated_tokens), max_new_tokens) # Test with streaming if with_streaming: request_id = manager.add_request(inputs, max_new_tokens=max_new_tokens, streaming=True) # In streaming mode, the total number of generated tokens is incremented by 1 on each iteration chunk_1 = next(manager.request_id_iter(request_id)) self.assertEqual(len(chunk_1.generated_tokens), 1) chunk_2 = next(manager.request_id_iter(request_id)) self.assertEqual(len(chunk_2.generated_tokens), 2) chunk_3 = next(manager.request_id_iter(request_id)) self.assertEqual(len(chunk_3.generated_tokens), 3) manager.stop(block=True) def test_streaming_request(self) -> None: self._test_streaming_or_not_request(with_streaming=True, with_non_streaming=False) def test_non_streaming_request(self) -> None: self._test_streaming_or_not_request(with_streaming=False, with_non_streaming=True) def test_streaming_and_non_streaming_requests_can_alternate(self) -> None: self._test_streaming_or_not_request(with_streaming=True, with_non_streaming=True) # -----------------------------------------Misc. tests----------------------------------------- # # Various tests that don't fit into the other categories # # --------------------------------------------------------------------------------------------- # def _test_block_sharing( self, model_id: str, expected_layer_types: dict[str, int], input_msg: str, expected_output_tokens: list[int] ) -> None: tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) generation_config = GenerationConfig(do_sample=False, block_size=32) with model.continuous_batching_context_manager(generation_config=generation_config) as manager: manager.logit_processor = LogitsProcessorList() # Create a request with at least 32 tokens but less than 64 so prefill only generates one complete block messages = [{"content": input_msg, "role": "user"}] inputs = tokenizer.apply_chat_template( messages, return_tensors="pt", add_generation_prompt=True, return_dict=False ) inputs = inputs.to(model.device)[0].tolist() self.assertGreaterEqual(len(inputs), 32, f"Input length is {len(inputs)} instead of at least 32") self.assertLess(len(inputs), 64, f"Input length is {len(inputs)} instead of less than 64") # First request, which populates the cache w/ 2 complete blocks for each full attention layer group request_id = manager.add_request(inputs, max_new_tokens=32) chunk_no_reuse = next(manager.request_id_iter(request_id)) num_fa = expected_layer_types["full_attention"] num_sw = expected_layer_types["sliding_window"] if manager.batch_processor is None: raise RuntimeError("Batch processor is None even after a request was added.") hash_table = manager.batch_processor.cache._block_manager._hash_to_id self.assertEqual( len(hash_table), 2 * num_fa, # 2 = 1 for prefill + 1 for decode f"There should be {2 * num_fa} blocks, 2 for each full attention layer group, but {len(hash_table) = }", ) total_prefix_length = manager.batch_processor.cache._total_prefix_length self.assertEqual( total_prefix_length, 0, f"Expected total prefix length to be 0, got {total_prefix_length}" ) # Assert the number of layer groups and their types are the expected ones layer_groups = manager.batch_processor.cache.group_cache_managers self.assertEqual( len(layer_groups), num_fa + num_sw, f"There should be {num_fa + num_sw} layer groups, but {len(layer_groups) = }", ) layer_group_types = {"full_attention": 0, "sliding_window": 0} for cm in layer_groups: if isinstance(cm, FullAttentionCacheAllocator): layer_group_types["full_attention"] += 1 elif isinstance(cm, SlidingAttentionCacheAllocator): layer_group_types["sliding_window"] += 1 else: raise ValueError(f"Invalid layer group type: {type(cm)}") self.assertEqual( layer_group_types, expected_layer_types, f"The expected layer group types are\n{expected_layer_types}\nbut got\n{layer_group_types}", ) # Second request, which should reuse the same blocks for the full attention layer groups request_id = manager.add_request(inputs, max_new_tokens=32) chunk_with_reuse = next(manager.request_id_iter(request_id)) # There should only still be two blocks in the hash table because of block reuse self.assertEqual( len(hash_table), 2 * num_fa, f"Because of block reuse, there should still be two blocks in the hash table, but {len(hash_table) = }", ) # Check that the whole prefill was matched if there are only full attention layers if expected_layer_types["sliding_window"] == 0: expected_total_prefix_length = 32 else: expected_total_prefix_length = 0 total_prefix_length = manager.batch_processor.cache._total_prefix_length self.assertEqual( total_prefix_length, expected_total_prefix_length, f"Expected total prefix length to be {expected_total_prefix_length}, but got {total_prefix_length = }", ) # Check the outputs were the same self.assertEqual(chunk_no_reuse.generated_tokens, chunk_with_reuse.generated_tokens) # As an additional sanity check, we also compare to the generated tokens when prefix sharing is disabled print(f"{chunk_no_reuse.generated_tokens = } {expected_output_tokens = }") self.assertEqual(chunk_no_reuse.generated_tokens, expected_output_tokens) def test_prefix_sharing(self) -> None: model_id = "Qwen/Qwen2.5-0.5B-Instruct" num_layer_groups = {"full_attention": 1, "sliding_window": 0} input_msg = "What is the Transformers library known for?" expected_generated_tokens = Expectations({ (None, None): [785, 80532, 6733, 374, 3881, 369, 1181, 5726, 311, 1855, 323, 36635, 3460, 12934, 4128, 4119, 11, 2670, 1846, 429, 646, 6923, 1467, 11, 14683, 1467, 11, 323, 2736, 1008, 4128, 13904] }).get_expectation() # fmt: skip return self._test_block_sharing(model_id, num_layer_groups, input_msg, expected_generated_tokens) def test_block_sharing_with_hybrid_model(self) -> None: model_id = "google/gemma-3-1b-it" num_layer_groups = {"full_attention": 2, "sliding_window": 11} input_msg = "I am a software engineer looking to use open source software to build a new AI agent. What is the Transformers library known for?" expected_generated_tokens = Expectations({ (None, None): [19058, 236764, 1531, 236789, 236751, 2541, 1679, 1144, 506, 128282, 9427, 563, 3224, 573, 236764, 10916, 528, 506, 4403, 529, 3788, 12498, 11362, 236761, 1030, 236789, 236751, 496, 808, 120749, 236829, 532] }).get_expectation() # fmt: skip return self._test_block_sharing(model_id, num_layer_groups, input_msg, expected_generated_tokens) @parameterized.expand([True, False]) @require_flash_attn # otherwise the test can fail because attention bias has a very slight impact on SDPA and eager def test_num_return_sequences(self, allow_block_sharing: bool) -> None: model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") user_messages = [ "A robe takes 2 bolts of blue fiber and half that much white fiber. How many bolts in total does it take?" ] chats = [[{"role": "user", "content": user_message}] for user_message in user_messages] tokenized = [tokenizer.apply_chat_template(chat, add_generation_prompt=True) for chat in chats] input_ids = [(x if isinstance(x, list) else x["input_ids"]) for x in tokenized] # Generation with continuous batching model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="flash_attention_2") model = model.to(torch_device).eval() # type: ignore[assignment] <- torch_device is always w/ the decorator model.generation_config.max_new_tokens = 30 model.generation_config.do_sample = False # Generation with continuous batching manager_cm = model.continuous_batching_context_manager( allow_block_sharing=allow_block_sharing, block=True, timeout=5 ) # Main loop results = [] with manager_cm as manager: manager.num_return_sequences = 2 manager.add_requests(inputs=input_ids, max_new_tokens=30) requests_left = 2 while requests_left: result = manager.get_result(timeout=1) if result and result.is_finished(): results.append(result) requests_left -= 1 else: if not manager.is_running(): break self.assertEqual(len(results), 2, f"Expected 2 results, but got {len(results) = }") self.assertEqual(results[0].generated_tokens, results[1].generated_tokens)
{ "repo_id": "huggingface/transformers", "file_path": "tests/generation/test_continuous_batching.py", "license": "Apache License 2.0", "lines": 701, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:src/transformers/models/imagegpt/image_processing_imagegpt_fast.py
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for ImageGPT.""" from typing import Optional import numpy as np import torch import torchvision.transforms.v2.functional as tvF from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, ) from ...image_transforms import group_images_by_shape, reorder_images from ...image_utils import PILImageResampling from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, ) from .image_processing_imagegpt import ImageGPTImageProcessorKwargs def squared_euclidean_distance_torch(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: """ Compute squared Euclidean distances between all pixels and clusters. Args: a: (N, 3) tensor of pixel RGB values b: (M, 3) tensor of cluster RGB values Returns: (N, M) tensor of squared distances """ b = b.t() # (3, M) a2 = torch.sum(a**2, dim=1) # (N,) b2 = torch.sum(b**2, dim=0) # (M,) ab = torch.matmul(a, b) # (N, M) d = a2[:, None] - 2 * ab + b2[None, :] # Squared Euclidean Distance: a^2 - 2ab + b^2 return d # (N, M) tensor of squared distances def color_quantize_torch(x: torch.Tensor, clusters: torch.Tensor) -> torch.Tensor: """ Assign each pixel to its nearest color cluster. Args: x: (H*W, 3) tensor of flattened pixel RGB values clusters: (n_clusters, 3) tensor of cluster RGB values Returns: (H*W,) tensor of cluster indices """ d = squared_euclidean_distance_torch(x, clusters) return torch.argmin(d, dim=1) @auto_docstring class ImageGPTImageProcessorFast(BaseImageProcessorFast): model_input_names = ["input_ids"] resample = PILImageResampling.BILINEAR do_color_quantize = True clusters = None image_mean = [0.5, 0.5, 0.5] image_std = [0.5, 0.5, 0.5] do_rescale = True do_normalize = True valid_kwargs = ImageGPTImageProcessorKwargs def __init__( self, clusters: list | np.ndarray | torch.Tensor | None = None, # keep as arg for backwards compatibility **kwargs: Unpack[ImageGPTImageProcessorKwargs], ): r""" clusters (`np.ndarray` or `list[list[int]]` or `torch.Tensor`, *optional*): The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overridden by `clusters` in `preprocess`. """ clusters = torch.as_tensor(clusters, dtype=torch.float32) if clusters is not None else None super().__init__(clusters=clusters, **kwargs) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: dict[str, int], interpolation: Optional["tvF.InterpolationMode"], do_center_crop: bool, crop_size: dict[str, int], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: float | list[float] | None, image_std: float | list[float] | None, do_color_quantize: bool | None = None, clusters: list | np.ndarray | torch.Tensor | None = None, disable_grouping: bool | None = None, return_tensors: str | TensorType | None = None, **kwargs, ): # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images pixel_values = reorder_images(processed_images_grouped, grouped_images_index) # If color quantization is requested, perform it; otherwise return pixel values if do_color_quantize: # Prepare clusters if clusters is None: raise ValueError("Clusters must be provided for color quantization.") # Convert to torch tensor if needed (clusters might be passed as list/numpy) clusters_torch = ( torch.as_tensor(clusters, dtype=torch.float32) if not isinstance(clusters, torch.Tensor) else clusters ).to(pixel_values[0].device, dtype=pixel_values[0].dtype) # Group images by shape for batch processing # We need to check if the pixel values are a tensor or a list of tensors grouped_images, grouped_images_index = group_images_by_shape( pixel_values, disable_grouping=disable_grouping ) # Process each group input_ids_grouped = {} for shape, stacked_images in grouped_images.items(): input_ids = color_quantize_torch( stacked_images.permute(0, 2, 3, 1).reshape(-1, 3), clusters_torch ) # (B*H*W, C) input_ids_grouped[shape] = input_ids.reshape(stacked_images.shape[0], -1).reshape( stacked_images.shape[0], -1 ) # (B, H, W) input_ids = reorder_images(input_ids_grouped, grouped_images_index) return BatchFeature(data={"input_ids": input_ids}, tensor_type=return_tensors) return BatchFeature(data={"pixel_values": pixel_values}, tensor_type=return_tensors) def to_dict(self): # Convert torch tensors to lists for JSON serialization output = super().to_dict() if output.get("clusters") is not None and isinstance(output["clusters"], torch.Tensor): output["clusters"] = output["clusters"].tolist() return output __all__ = ["ImageGPTImageProcessorFast"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/imagegpt/image_processing_imagegpt_fast.py", "license": "Apache License 2.0", "lines": 152, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:benchmark_v2/run_benchmarks.py
#!/usr/bin/env python3 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Top-level benchmarking script that automatically discovers and runs all benchmarks in the ./benches directory, organizing outputs into model-specific subfolders. """ import argparse import json import logging import sys import uuid from framework.benchmark_config import BenchmarkConfig, adapt_configs, get_config_by_level from framework.benchmark_runner import BenchmarkRunner if __name__ == "__main__": # Parse arguments parser = argparse.ArgumentParser() parser.add_argument("--output-dir", type=str, default=None, help="Output dir for benchmark results") parser.add_argument("--log-level", type=str, choices=["DEBUG", "INFO", "WARNING", "ERROR"], default="WARNING") parser.add_argument("--model-id", type=str, help="Specific model ID to benchmark (if supported by benchmarks)") parser.add_argument("--warmup", "-w", type=int, default=3, help="Number of warmup iterations") parser.add_argument("--iterations", "-i", type=int, default=10, help="Number of measurement iterations") parser.add_argument("--batch-size", "-b", type=int, nargs="+", help="Batch size") parser.add_argument("--sequence-length", "-s", type=int, nargs="+", help="Sequence length") parser.add_argument("--num-tokens-to-generate", "-n", type=int, nargs="+", help="Number of tokens to generate") parser.add_argument( "--level", type=int, default=1, help="Level of coverage for the benchmark. 0: only the main config, 1: a few important configs, 2: a config for" " each attn implementation an option, 3: cross-generate all combinations of configs, 4: cross-generate all" " combinations of configs w/ all compile modes", ) parser.add_argument("--config-file", type=str, help="Path to a config file stored as a json or jsonl format") parser.add_argument("--num-tokens-to-profile", "-p", type=int, default=0, help="Number of tokens to profile") parser.add_argument("--branch-name", type=str, help="Git branch name") parser.add_argument("--commit-id", type=str, help="Git commit ID (if not provided, will auto-detect from git)") parser.add_argument("--commit-message", type=str, help="Git commit message") parser.add_argument( "--no-gpu-monitoring", action="store_true", help="Disables GPU monitoring during benchmark runs" ) parser.add_argument( "--push-result-to-dataset", type=str, default=None, help="Name of the dataset to push results to. If not provided, results are not pushed to the Hub.", ) args = parser.parse_args() # Setup logging benchmark_run_uuid = str(uuid.uuid4())[:8] numeric_level = getattr(logging, args.log_level.upper()) handlers = [logging.StreamHandler(sys.stdout)] logging.basicConfig( level=numeric_level, format="[%(levelname)s - %(asctime)s] %(name)s: %(message)s", handlers=handlers ) logger = logging.getLogger("benchmark_v2") logger.info("Starting benchmark discovery and execution") logger.info(f"Benchmark run UUID: {benchmark_run_uuid}") logger.info(f"Output directory: {args.output_dir}") # Error out if one of the arguments is not provided if any(arg is None for arg in [args.batch_size, args.sequence_length, args.num_tokens_to_generate]): raise ValueError( "All of the arguments --batch-size, --sequence-length, and --num-tokens-to-generate are required" ) # We cannot compute ITL if we don't have at least two measurements if any(n <= 1 for n in args.num_tokens_to_generate): raise ValueError("--num_tokens_to_generate arguments should be larger than 1") # If a config file is provided, read it and use the configs therein. They will still be adapted to the given arguments. if args.config_file is not None: if args.config_file.endswith(".json"): with open(args.config_file, "r") as f: config_as_dicts = [json.load(f)] elif args.config_file.endswith(".jsonl"): with open(args.config_file, "r") as f: config_as_dicts = [json.loads(line) for line in f if line.startswith("{")] else: raise ValueError(f"Unsupported config file format: {args.config_file}") configs = [BenchmarkConfig.from_dict(config) for config in config_as_dicts] else: # Otherwise, get the configs for the given coverage level configs = get_config_by_level(args.level) # Adapt the configs to the given arguments configs = adapt_configs( configs, args.warmup, args.iterations, args.batch_size, args.sequence_length, args.num_tokens_to_generate, not args.no_gpu_monitoring, ) runner = BenchmarkRunner(logger, args.output_dir, args.branch_name, args.commit_id, args.commit_message) timestamp, results = runner.run_benchmarks( args.model_id, configs, args.num_tokens_to_profile, pretty_print_summary=True ) dataset_id = args.push_result_to_dataset if dataset_id is not None and len(results) > 0: runner.push_results_to_hub(dataset_id, results, timestamp)
{ "repo_id": "huggingface/transformers", "file_path": "benchmark_v2/run_benchmarks.py", "license": "Apache License 2.0", "lines": 109, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/apertus/modular_apertus.py
# Copyright 2025 the HuggingFace Inc. team and the Swiss AI Initiative. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Callable import torch from torch import nn from ...activations import ACT2CLS from ...cache_utils import Cache from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm, LlamaRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from ..nemotron.modeling_nemotron import NemotronMLP logger = logging.get_logger(__name__) class ApertusConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ApertusModel`]. It is used to instantiate a Apertus model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Apertus-8B. e.g. [swiss-ai/Apertus-8B](https://huggingface.co/swiss-ai/Apertus-8B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 131072): Vocabulary size of the Apertus model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ApertusModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"xielu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 65536): The maximum sequence length that this model might ever be used with. Apertus supports up to 65536 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 3): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import ApertusModel, ApertusConfig >>> # Initializing a Apertus-8B style configuration >>> configuration = ApertusConfig() >>> # Initializing a model from the Apertus-8B style configuration >>> model = ApertusModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "apertus" keys_to_ignore_at_inference = ["past_key_values"] default_theta = 12000000.0 base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 131072, hidden_size: int | None = 4096, intermediate_size: int | None = 14336, num_hidden_layers: int | None = 32, num_attention_heads: int | None = 32, num_key_value_heads: int | None = None, hidden_act: str | None = "xielu", max_position_embeddings: int | None = 65536, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-5, use_cache: bool | None = True, pad_token_id: int | None = 3, bos_token_id: int | None = 1, eos_token_id: int | None = 2, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | None = { "rope_type": "llama3", "rope_theta": 12000000.0, "factor": 8.0, "original_max_position_embeddings": 8192, "low_freq_factor": 1.0, "high_freq_factor": 4.0, }, attention_bias: bool | None = False, attention_dropout: float | None = 0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters self.tie_word_embeddings = tie_word_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(**kwargs) class ApertusMLP(NemotronMLP): def __init__(self, config): super().__init__(config) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) if config.hidden_act == "xielu": self.act_fn = ACT2CLS["xielu"](dtype=config.dtype) class ApertusRMSNorm(LlamaRMSNorm): pass class ApertusRotaryEmbedding(LlamaRotaryEmbedding): pass class ApertusAttention(LlamaAttention): def __init__(self, config: ApertusConfig, layer_idx: int | None = None): super().__init__(config, layer_idx) self.q_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps) self.k_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) query_states = self.q_norm(query_states) key_states = self.k_norm(key_states) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class ApertusDecoderLayer(LlamaDecoderLayer): def __init__(self, config: ApertusConfig, layer_idx: int): super().__init__(config, layer_idx) self.attention_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.feedforward_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps) del self.input_layernorm del self.post_attention_layernorm def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, use_cache: bool | None = False, cache_position: torch.LongTensor | None = None, position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: residual = hidden_states hidden_states = self.attention_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.feedforward_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class ApertusPreTrainedModel(LlamaPreTrainedModel): pass class ApertusModel(LlamaModel): pass class ApertusForCausalLM(LlamaForCausalLM): def forward(self, **super_kwargs): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, ApertusForCausalLM >>> model = ApertusForCausalLM.from_pretrained("swiss-ai/Apertus-8B") >>> tokenizer = AutoTokenizer.from_pretrained("swiss-ai/Apertus-8B") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" return super().forward(**super_kwargs) class ApertusForTokenClassification(LlamaForTokenClassification): pass __all__ = [ "ApertusConfig", "ApertusModel", "ApertusForCausalLM", "ApertusForTokenClassification", "ApertusPreTrainedModel", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/apertus/modular_apertus.py", "license": "Apache License 2.0", "lines": 292, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/apertus/test_modeling_apertus.py
# Copyright 2025 The HuggingFace Inc. team and the Swiss AI Initiative. All rights reserved. # # This code is based on HuggingFace's LLaMA implementation in this library. # It has been modified from its original forms to accommodate minor architectural # differences compared to LLaMA used by the Swiss AI Initiative that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Apertus model.""" import unittest from transformers import is_torch_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, slow, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): from transformers import ( ApertusForCausalLM, ApertusModel, ) class ApertusModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = ApertusModel @require_torch class ApertusModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = ApertusModelTester # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = ApertusForCausalLM if is_torch_available() else None @require_torch_accelerator @slow class ApertusIntegrationTest(unittest.TestCase): pass
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/apertus/test_modeling_apertus.py", "license": "Apache License 2.0", "lines": 46, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:utils/important_files.py
# List here the models to always test. IMPORTANT_MODELS = [ "auto", "bert", "gpt2", "t5", "modernbert", "vit", "clip", "detr", "table_transformer", "got_ocr2", "whisper", "wav2vec2", "qwen2_audio", "speech_t5", "csm", "llama", "gemma3", "qwen2", "mistral3", "qwen2_5_vl", "llava", "smolvlm", "internvl", "gemma3n", "gpt_oss", "qwen2_5_omni", ]
{ "repo_id": "huggingface/transformers", "file_path": "utils/important_files.py", "license": "Apache License 2.0", "lines": 29, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/transformers:src/transformers/pipelines/keypoint_matching.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Sequence from typing import Any, TypeAlias, TypedDict, Union from typing_extensions import overload from ..image_utils import is_pil_image from ..utils import is_vision_available, requires_backends from .base import Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image ImagePair: TypeAlias = Sequence[Union["Image.Image", str]] class Keypoint(TypedDict): x: float y: float class Match(TypedDict): keypoint_image_0: Keypoint keypoint_image_1: Keypoint score: float def validate_image_pairs(images: Any) -> Sequence[Sequence[ImagePair]]: error_message = ( "Input images must be a one of the following :", " - A pair of images.", " - A list of pairs of images.", ) def _is_valid_image(image): """images is a PIL Image or a string.""" return is_pil_image(image) or isinstance(image, str) if isinstance(images, Sequence): if len(images) == 2 and all((_is_valid_image(image)) for image in images): return [images] if all( isinstance(image_pair, Sequence) and len(image_pair) == 2 and all(_is_valid_image(image) for image in image_pair) for image_pair in images ): return images raise ValueError(error_message) class KeypointMatchingPipeline(Pipeline): """ Keypoint matching pipeline using any `AutoModelForKeypointMatching`. This pipeline matches keypoints between two images. """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") def _sanitize_parameters(self, threshold=None, timeout=None): preprocess_params = {} if timeout is not None: preprocess_params["timeout"] = timeout postprocess_params = {} if threshold is not None: postprocess_params["threshold"] = threshold return preprocess_params, {}, postprocess_params @overload def __call__(self, inputs: ImagePair, threshold: float = 0.0, **kwargs: Any) -> list[Match]: ... @overload def __call__(self, inputs: list[ImagePair], threshold: float = 0.0, **kwargs: Any) -> list[list[Match]]: ... def __call__( self, inputs: list[ImagePair] | ImagePair, threshold: float = 0.0, **kwargs: Any, ) -> list[Match] | list[list[Match]]: """ Find matches between keypoints in two images. Args: inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single pair of images or a batch of image pairs, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.0): The threshold to use for keypoint matching. Keypoints matched with a lower matching score will be filtered out. A value of 0 means that all matched keypoints will be returned. kwargs: `timeout (`float`, *optional*, defaults to None)` The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: Union[list[Match], list[list[Match]]]: A list of matches or a list if a single image pair is provided, or of lists of matches if a batch of image pairs is provided. Each match is a dictionary containing the following keys: - **keypoint_image_0** (`Keypoint`): The keypoint in the first image (x, y coordinates). - **keypoint_image_1** (`Keypoint`): The keypoint in the second image (x, y coordinates). - **score** (`float`): The matching score between the two keypoints. """ if inputs is None: raise ValueError("Cannot call the keypoint-matching pipeline without an inputs argument!") formatted_inputs = validate_image_pairs(inputs) outputs = super().__call__(formatted_inputs, threshold=threshold, **kwargs) if len(formatted_inputs) == 1: return outputs[0] return outputs def preprocess(self, images, timeout=None): images = [load_image(image, timeout=timeout) for image in images] model_inputs = self.image_processor(images=images, return_tensors="pt") model_inputs = model_inputs.to(self.dtype) target_sizes = [image.size for image in images] preprocess_outputs = {"model_inputs": model_inputs, "target_sizes": target_sizes} return preprocess_outputs def _forward(self, preprocess_outputs): model_inputs = preprocess_outputs["model_inputs"] model_outputs = self.model(**model_inputs) forward_outputs = {"model_outputs": model_outputs, "target_sizes": [preprocess_outputs["target_sizes"]]} return forward_outputs def postprocess(self, forward_outputs, threshold=0.0) -> list[Match]: model_outputs = forward_outputs["model_outputs"] target_sizes = forward_outputs["target_sizes"] postprocess_outputs = self.image_processor.post_process_keypoint_matching( model_outputs, target_sizes=target_sizes, threshold=threshold ) postprocess_outputs = postprocess_outputs[0] pair_result = [] for kp_0, kp_1, score in zip( postprocess_outputs["keypoints0"], postprocess_outputs["keypoints1"], postprocess_outputs["matching_scores"], ): kp_0 = Keypoint(x=kp_0[0].item(), y=kp_0[1].item()) kp_1 = Keypoint(x=kp_1[0].item(), y=kp_1[1].item()) pair_result.append(Match(keypoint_image_0=kp_0, keypoint_image_1=kp_1, score=score.item())) pair_result = sorted(pair_result, key=lambda x: x["score"], reverse=True) return pair_result
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/pipelines/keypoint_matching.py", "license": "Apache License 2.0", "lines": 142, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/pipelines/test_pipelines_keypoint_matching.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import datasets from transformers.models.auto.modeling_auto import MODEL_FOR_KEYPOINT_MATCHING_MAPPING from transformers.pipelines import KeypointMatchingPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, is_vision_available, require_torch, require_vision, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image @is_pipeline_test @require_torch @require_vision class KeypointMatchingPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_KEYPOINT_MATCHING_MAPPING _dataset = None @classmethod def _load_dataset(cls): # Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process. if cls._dataset is None: cls._dataset = datasets.load_dataset("hf-internal-testing/image-matching-dataset", split="train") def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): image_matcher = KeypointMatchingPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_matcher, examples def run_pipeline_test(self, image_matcher, examples): self._load_dataset() outputs = image_matcher( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( outputs, [ { "keypoint_image_0": {"x": ANY(float), "y": ANY(float)}, "keypoint_image_1": {"x": ANY(float), "y": ANY(float)}, "score": ANY(float), } ] * 2, # 2 matches per image pair ) # Accepts URL + PIL.Image + lists outputs = image_matcher( [ [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ], [self._dataset[0]["image"], self._dataset[1]["image"]], [self._dataset[1]["image"], self._dataset[2]["image"]], [self._dataset[2]["image"], self._dataset[0]["image"]], ] ) self.assertEqual( outputs, [ [ { "keypoint_image_0": {"x": ANY(float), "y": ANY(float)}, "keypoint_image_1": {"x": ANY(float), "y": ANY(float)}, "score": ANY(float), } ] * 2 # 2 matches per image pair ] * 4, # 4 image pairs ) @require_torch def test_single_image(self): self._load_dataset() small_model = "magic-leap-community/superglue_outdoor" image_matcher = pipeline("keypoint-matching", model=small_model) with self.assertRaises(ValueError): image_matcher( self._dataset[0]["image"], threshold=0.0, ) with self.assertRaises(ValueError): image_matcher( [self._dataset[0]["image"]], threshold=0.0, ) @require_torch def test_single_pair(self): self._load_dataset() small_model = "magic-leap-community/superglue_outdoor" image_matcher = pipeline("keypoint-matching", model=small_model) image_0: Image.Image = self._dataset[0]["image"] image_1: Image.Image = self._dataset[1]["image"] outputs = image_matcher((image_0, image_1), threshold=0.0) output = outputs[0] # first match from image pair self.assertAlmostEqual(output["keypoint_image_0"]["x"], 698, places=1) self.assertAlmostEqual(output["keypoint_image_0"]["y"], 469, places=1) self.assertAlmostEqual(output["keypoint_image_1"]["x"], 434, places=1) self.assertAlmostEqual(output["keypoint_image_1"]["y"], 440, places=1) self.assertAlmostEqual(output["score"], 0.9905, places=3) @require_torch def test_multiple_pairs(self): self._load_dataset() small_model = "magic-leap-community/superglue_outdoor" image_matcher = pipeline("keypoint-matching", model=small_model) image_0: Image.Image = self._dataset[0]["image"] image_1: Image.Image = self._dataset[1]["image"] image_2: Image.Image = self._dataset[2]["image"] outputs = image_matcher( [ (image_0, image_1), (image_1, image_2), (image_2, image_0), ], threshold=1e-4, ) # Test first pair (image_0, image_1) output_0 = outputs[0][0] # First match from first pair self.assertAlmostEqual(output_0["keypoint_image_0"]["x"], 698, places=1) self.assertAlmostEqual(output_0["keypoint_image_0"]["y"], 469, places=1) self.assertAlmostEqual(output_0["keypoint_image_1"]["x"], 434, places=1) self.assertAlmostEqual(output_0["keypoint_image_1"]["y"], 440, places=1) self.assertAlmostEqual(output_0["score"], 0.9905, places=3) # Test second pair (image_1, image_2) output_1 = outputs[1][0] # First match from second pair self.assertAlmostEqual(output_1["keypoint_image_0"]["x"], 272, places=1) self.assertAlmostEqual(output_1["keypoint_image_0"]["y"], 310, places=1) self.assertAlmostEqual(output_1["keypoint_image_1"]["x"], 228, places=1) self.assertAlmostEqual(output_1["keypoint_image_1"]["y"], 568, places=1) self.assertAlmostEqual(output_1["score"], 0.9890, places=3) # Test third pair (image_2, image_0) output_2 = outputs[2][0] # First match from third pair self.assertAlmostEqual(output_2["keypoint_image_0"]["x"], 385, places=1) self.assertAlmostEqual(output_2["keypoint_image_0"]["y"], 677, places=1) self.assertAlmostEqual(output_2["keypoint_image_1"]["x"], 689, places=1) self.assertAlmostEqual(output_2["keypoint_image_1"]["y"], 351, places=1) self.assertAlmostEqual(output_2["score"], 0.9900, places=3)
{ "repo_id": "huggingface/transformers", "file_path": "tests/pipelines/test_pipelines_keypoint_matching.py", "license": "Apache License 2.0", "lines": 170, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/transformers:examples/pytorch/continuous_batching_simple.py
# Copyright 2025 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import time import datasets import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig from transformers.utils import is_torch_accelerator_available MODEL_ID = "Qwen/Qwen3-4B-Instruct-2507" DISPLAYED_SAMPLES = 3 if __name__ == "__main__": # Parse args parser = argparse.ArgumentParser() parser.add_argument("--num-blocks", "-n", type=int, default=None) parser.add_argument("--max-batch-tokens", "-b", type=int, default=None) parser.add_argument("--attn", type=str, default="kernels-community/flash-attn2", help="Attention implementation") parser.add_argument("--samples", type=int, default=500) parser.add_argument("--max-new-tokens", type=int, default=32) args = parser.parse_args() device = torch.accelerator.current_accelerator() if is_torch_accelerator_available() else "cuda" device_map = "cpu" if device is None else device.type # Prepare model model = AutoModelForCausalLM.from_pretrained( MODEL_ID, attn_implementation=args.attn, device_map=device_map, dtype=torch.bfloat16, ) model = model.eval() # Prepare tokenizer and dataset tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, padding_side="left") dataset = datasets.load_dataset("openai/gsm8k", "socratic", split="test") dataset = dataset.select(range(args.samples)) tokenized_datasets = dataset.map(lambda x: tokenizer(x["question"]), batched=True) simple_batch_inputs = [item["input_ids"] for item in tokenized_datasets] # Prepare generation config generation_config = GenerationConfig( max_new_tokens=args.max_new_tokens, use_cuda_graph=False, # Not supported for simple version eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, do_sample=False, num_blocks=args.num_blocks, max_batch_tokens=args.max_batch_tokens, ) # Warmup iterations _ = model.generate_batch( inputs=simple_batch_inputs[: min(5, args.samples)], generation_config=generation_config, ) # Actual batch generation print("--- Running CB Generation Example ---") start_time = time.time() batch_outputs = model.generate_batch( inputs=simple_batch_inputs, generation_config=generation_config, ) end_time = time.time() print("Done with batch generation.") # Decode outputs token_count = 0 for i, request in enumerate(batch_outputs): input_text = tokenizer.decode(batch_outputs[request].prompt_ids, skip_special_tokens=True) # Try to decode the output try: output_text = tokenizer.decode(batch_outputs[request].generated_tokens, skip_special_tokens=True) token_count += len(batch_outputs[request].generated_tokens[1:]) except Exception as e: print(f"Decoding failed for request {request}: {e}") continue # Display sample if asked if i < DISPLAYED_SAMPLES: print("-" * 20) print(f"{request} Input: {input_text}") if len(output_text) > 0: print(f"{request} Output: {output_text}") else: print(f"[WARN] {request} Output was empty!") # Compute stats and maybe print them gen_time = end_time - start_time tok_per_sec = token_count / gen_time print("-" * 20) print("--- Finished CB Generation Example ---\n") print(f"CB generation took: {gen_time:.2f} seconds for {token_count} tokens. {tok_per_sec:.2f}tok/s")
{ "repo_id": "huggingface/transformers", "file_path": "examples/pytorch/continuous_batching_simple.py", "license": "Apache License 2.0", "lines": 96, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/generation/continuous_batching/cache.py
# Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import floor, gcd, sqrt import torch from ...configuration_utils import PreTrainedConfig from ...generation.configuration_utils import GenerationConfig from ...utils.generic import is_flash_attention_requested from ...utils.metrics import attach_tracer, traced from .cache_manager import BlockManager, CacheAllocator, FullAttentionCacheAllocator, SlidingAttentionCacheAllocator from .requests import RequestState, RequestStatus, get_device_and_memory_breakdown, logger def group_layers_by_attn_type(config: PreTrainedConfig) -> tuple[list[list[int]], list[str]]: """ Group layers depending on the attention mix, according to VLLM's hybrid allocator rules: - Layers in each group need to have the same type of attention - All groups have the same number of layers For a model with the following layer types: ["sliding", "full", "full", "sliding", "full", "full", "full", "full"] We would get four groups: [0, 3], [1, 2], [4,5] and [6,7]. """ # If the config has no layer_type attribute, it means all layers are the same attention type layer_types = getattr(config, "layer_types", None) if layer_types is None: attn_type = "sliding_attention" if getattr(config, "sliding_window", None) is not None else "full_attention" layer_types = [attn_type for _ in range(config.num_hidden_layers)] # We then count the number of layers of each type layer_counts = {} for i, layer_type in enumerate(layer_types): layer_counts[layer_type] = layer_counts.get(layer_type, []) + [i] # The size of all groups is the greatest common divisor of the number of layers of each type group_size = gcd(*[len(indices) for indices in layer_counts.values()]) # We then group the layers by type layer_groups = [] for layer_type, indices in layer_counts.items(): for i in range(0, len(indices), group_size): layer_groups.append(indices[i : i + group_size]) # And note the layer types group_types = [layer_types[lg[0]] for lg in layer_groups] return layer_groups, group_types @attach_tracer() class PagedAttentionCache: """ Manages the cache for a paged attention mechanism, inspired by VLLM's hybrid allocator. The cache relies on making groups of layers to reduce the complexity of cache management and fragmentation. The cache uses a three-level hierarchy: - Pages: The smallest unit of cache, a page has a size of [num_heads, head_size], which is the space needed to store the key or value states for one token and one layer. For a model with only full-attention layers, to store the KV cache of one token, we need `2 * num_layers` pages: key and values each take `num_layers` pages. Pages are grouped into blocks: - Blocks: A block is a collection of `block_size` pages, serving as the allocation unit to reduce management complexity and fragmentation. Cache is allocated and freed block by block, not page by page. One block is allocated to one layer group, which only has one attention type, like full-attention or sliding-attention. If all layers in the model have the same attention type, then all layers will be in the same group. There is more than one group if and only if the model has a mixed attention types, like layers with full-attention and layers with sliding-attention. - Cache tensors: The physical supports for the cache. There are as many cache tensors as there are layer in a layer group, and the shape of the cache tensor is `[num_blocks * block_size, num_heads, head_size]`. Grouping layers into groups is useful because when we allocate one block to a group N, the block allocated is the same for all layers in group N, equivalently it is allocated across all cache tensors. This allows us to efficiently allocate and free blocks, and to efficiently read and write key and value states. For instance, imagine we have 8 blocks of cache and a model with two layer groups: a full-attention group with 3 layers and a sliding-attention group with 3 layers. At creation time, the physical cache tensors look like this: cache_tensor_0: □ □ □ □ □ □ □ □ cache_tensor_1: □ □ □ □ □ □ □ □ cache_tensor_2: □ □ □ □ □ □ □ □ where □ means the blocks is not allocated to any layer group yet. We have 3 cache tensors because there are 3 layers per group. We allocate 1 block to each group, after allocation, the cache tensors look like this: cache_tensor_0: ✖ ◉ □ □ □ □ □ □ cache_tensor_1: ✖ ◉ □ □ □ □ □ □ cache_tensor_2: ✖ ◉ □ □ □ □ □ □ where ✖ means the block is allocated to the full-attention group, and ◉ means the block is allocated to the sliding-attention group. Now, if we continue to generate, and the sliding window has been reached, we only need to allocate a new block for the full-attention group, and the cache tensors look like this: cache_tensor_0: ✖ ◉ ✖ □ □ □ □ □ cache_tensor_1: ✖ ◉ ✖ □ □ □ □ □ cache_tensor_2: ✖ ◉ ✖ □ □ □ □ □ And after further generation, when we need a new block allocated: cache_tensor_0: ✖ ◉ ✖ ✖ □ □ □ □ cache_tensor_1: ✖ ◉ ✖ ✖ □ □ □ □ cache_tensor_2: ✖ ◉ ✖ ✖ □ □ □ □ This would not have been possible if all layers were in the same group: we would have had to allocate a new block for the sliding-attention group, although it is not needed. """ def __init__( self, config: PreTrainedConfig, generation_config: GenerationConfig, device: torch.device | str, dtype: torch.dtype = torch.float16, tp_size: int | None = None, allow_block_sharing: bool = True, ) -> None: """Initialize a paged attention cache for efficient memory usage. Also turns in prefix sharing if the model has only full attention layers. Args: config: Model configuration generation_config: Generation configuration containing cache parameters device: Device for the cache tensors dtype: Data type of the cache tp_size: Tensor parallelism size allow_block_sharing: A flag to allow block sharing. If the model has some full attention layers, then prefix sharing is enabled as well. """ self.config = config self.dtype = dtype self.device = device # Extract model dimensions kv_heads = getattr(config, "num_key_value_heads", None) self.num_key_value_heads: int = kv_heads if kv_heads is not None else config.num_attention_heads head_dim = getattr(config, "head_dim", None) self.head_dim: int = head_dim if head_dim is not None else config.hidden_size // config.num_attention_heads # Extract cache dimensions self.block_size = getattr(generation_config, "block_size", 32) # Group layers depending on the attention mix layer_groups, group_types = group_layers_by_attn_type(config) group_size = len(layer_groups[0]) self.num_groups = len(layer_groups) self.sliding_windows = {} self.layer_index_to_group_indices = {} for i, group in enumerate(layer_groups): sliding_window = config.sliding_window if group_types[i] == "sliding_attention" else 1 for j, layer in enumerate(group): self.layer_index_to_group_indices[layer] = (i, j) self.sliding_windows[layer] = sliding_window # Handle TP (or dont) if tp_size is not None and tp_size > 1: if self.num_key_value_heads % tp_size != 0: raise ValueError( f"Number of key value heads {self.num_key_value_heads} must be divisible by tensor parallel size {tp_size}." ) # If the model is using tensor parallelism, we need to adjust the number of heads accordingly. # self.num_key_value_heads //= tp_size # TODO: why is this commented out? # Infer number of blocks and max batch tokens page_size = self.head_dim * self.num_key_value_heads if is_flash_attention_requested(self.config): num_attention_masks = 0 # only used to compute the default memory footprint args elif "sliding_attention" in group_types: # TODO: when we generalize to allow for block-attn, we can use `num_attention_masks=sum(set(group_types))` num_attention_masks = 2 else: num_attention_masks = 1 memory_handler = PagedAttentionMemoryHandler( block_size=self.block_size, page_size=page_size, num_groups=self.num_groups, group_size=group_size, peak_activation_per_token=(config.hidden_size + config.vocab_size), num_attention_masks=num_attention_masks, ) num_blocks, max_batch_tokens = memory_handler.infer_num_blocks_and_max_batch_tokens( num_blocks=getattr(generation_config, "num_blocks", None), max_batch_tokens=getattr(generation_config, "max_batch_tokens", None), max_memory_percent=getattr( generation_config, "max_memory", 0.8 ), # FIXME: it seems we overcommit memory, was changed from 0.9 which caused OOMs in our benchmarking CI cache_dtype=self.dtype, ) # Add the inferred attributes to the class self.num_blocks = num_blocks self.max_batch_tokens = max_batch_tokens self.num_pages = self.num_blocks * self.block_size logger.info( f"PagedAttentionCache initialized with {self.num_blocks = }, {self.block_size = }, {page_size = }, " f"{self.max_batch_tokens = } {num_attention_masks = }" ) # Initialize the cache self.key_cache: list[torch.Tensor] = [] self.value_cache: list[torch.Tensor] = [] # We add two extra tokens to the cache to handle padding and generally discard unwanted tokens self.cache_shape = ((num_blocks + 2) * self.block_size, self.num_key_value_heads, self.head_dim) for _ in range(group_size): new_layer_key_cache = torch.empty(self.cache_shape, dtype=self.dtype, device=self.device) new_layer_value_cache = torch.empty(self.cache_shape, dtype=self.dtype, device=self.device) torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) logger.info(f"{self.cache_shape = } {self.key_cache[0].shape = } {self.key_cache[0].numel() = }") # Block management data structures self.allow_block_sharing = allow_block_sharing self.group_cache_managers: list[CacheAllocator] = [] self.num_full_attention_groups = 0 self.num_sliding_attention_groups = 0 self.max_sliding_window_blocks_per_request = 0 for i, group_type in enumerate(group_types): if group_type == "full_attention": cm = FullAttentionCacheAllocator(i, self.block_size, allow_block_sharing=allow_block_sharing) self.num_full_attention_groups += 1 elif group_type == "sliding_attention": cm = SlidingAttentionCacheAllocator(i, self.block_size, config.sliding_window) self.num_sliding_attention_groups += 1 self.max_sliding_window_blocks_per_request = cm._max_blocks_per_request else: raise ValueError(f"Invalid group type: {group_type}") self.group_cache_managers.append(cm) # We only use prefix sharing if the whole model has only full attention layers and block sharing is allowed self.use_prefix_sharing = allow_block_sharing and group_types == ["full_attention"] self._block_manager = BlockManager(num_blocks, self.block_size) self._total_prefix_length: int = 0 # a counter to measure the impact of prefix sharing, also used in tests def will_allocation_be_successful(self, num_requested_blocks: int, allocated_blocks: int) -> bool: """Returns a boolean indicating if the allocation of (num_requested_blocks) blocks will be successful. The number of newly allocated blocks needed is predicted by the following rules: - for full attention groups: since there is no sliding window for full attention layers, one requested block is always equivalent to one newly allocated block for EACH full attention group - for sliding window groups: because of the sliding window, the number of blocks allocated to a request is capped. Using the number of already (allocated_blocks) we can compute the number of new blocks to actually allocate to the request, which can be lower than the number of requested blocks. That number is the same for all sliding window groups, as only one sliding window size is supported. """ # This is not in a branch, because it is very rare to have zero full attention layer needed_blocks = num_requested_blocks * self.num_full_attention_groups # Only take this branch if the model has sliding window attention layers if self.num_sliding_attention_groups: blocks_left = max(self.max_sliding_window_blocks_per_request - allocated_blocks, 0) needed_blocks += min(blocks_left, num_requested_blocks) * self.num_sliding_attention_groups return needed_blocks <= self.get_num_free_blocks() @traced def allocate_blocks(self, n_blocks: int, request_id: str, allocated_blocks: int) -> int | None: """Allocate cache blocks across all layer groups for a given request. Actual allocation is done by the cache managers, and this method only returns the maximum number of blocks actually allocated across all managers.""" # First check allocation will be successful before starting, to avoid partial allocations if not self.will_allocation_be_successful(n_blocks, allocated_blocks): return None # Allocate blocks across all cache managers max_allocated = 0 for cm in self.group_cache_managers: num_allocated_blocks = cm.allocate_blocks(n_blocks, request_id, self._block_manager) if num_allocated_blocks is None: raise ValueError(f"Failed to allocate {n_blocks} blocks for request {request_id}") max_allocated = max(max_allocated, num_allocated_blocks) return max_allocated @traced def free_blocks(self, request_id: str) -> None: """Free all allocated cache blocks for a given request across all layer groups. Actual deallocation is done by the cache managers.""" for cm in self.group_cache_managers: cm.free_blocks(request_id, self._block_manager) def get_num_free_blocks(self) -> int: """Get the current number of unallocated blocks available for new requests.""" return self._block_manager.num_free_blocks @traced def extend_read_and_write_indices( self, request_id: str, past_length: int, query_length: int, read_index: list[list[int]], write_index: list[list[int]], ) -> None: """Retrieve physical cache indices for reading KV states in the cache across all layer groups. This method coordinates with all cache managers to build the complete set of read indices needed for attention computation. """ for cm, read_indices, write_indices in zip(self.group_cache_managers, read_index, write_index): indices = cm.get_read_indices(request_id, past_length, query_length) read_indices.extend(indices) indices = cm.get_write_indices(request_id, past_length, query_length) write_indices.extend(indices) @traced def get_seqlens_k(self, past_length: int, query_length: int) -> dict[str, int]: """Retrieve the key sequence length for the given request_id across all layer types. Returns a dictionary of layer types to their corresponding key sequence lengths.""" seqlens_k = {} if self.num_full_attention_groups > 0: seqlens_k["full_attention"] = past_length + query_length if self.num_sliding_attention_groups > 0: seqlens_k["sliding_attention"] = query_length + min(past_length, self.config.sliding_window - 1) # NOTE: when we add more attention types / different sliding windows, we can go back to looping over CMs return seqlens_k @traced def update( self, key_states: torch.Tensor, # shape [1, num_kv_heads, seqlen_kv, head_dim] value_states: torch.Tensor, # shape [1, num_kv_heads, seqlen_kv, head_dim] layer_idx: int, read_index: list[torch.Tensor], # shape [num_layer_groups, seqlen_kv + past_length] write_index: list[torch.Tensor], # shape [num_layer_groups, seqlen_q] ) -> tuple[torch.Tensor, torch.Tensor]: # shape [seqlen_kv + past_length, num_kv_heads, head_dim] """Update the cache with new key-value states for a specific layer. This method writes new KV states to the appropriate cache locations. The behavior differs based on the layer's attention type: - Full attention: New KV states are written to cache, then complete sequence is read from cache - Sliding window: Old KV is read from cache along with extra spaces for the new KV, then new KV is written to cache. This is because new KV might overwrite the old KV, so we need to read the old KV first. Returns the complete KV states (cached + new) for attention computation. """ # Retrieve the layer read and write indices, and if there is a sliding window group_idx, layer_idx_in_group = self.layer_index_to_group_indices[layer_idx] layer_read_index = read_index[group_idx] layer_write_index = write_index[group_idx] # Select the correct cache k_cache = self.key_cache[layer_idx_in_group] v_cache = self.value_cache[layer_idx_in_group] # Transpose the key and value states to match the cache shape, after which shape is [seqlen_kv, num_kv_heads, head_dim] key_states = key_states.transpose(1, 2).squeeze(0) value_states = value_states.transpose(1, 2).squeeze(0) # Case: full attention sliding_window = self.sliding_windows[layer_idx] if sliding_window == 1: k_cache[layer_write_index, :, :] = key_states v_cache[layer_write_index, :, :] = value_states key_states_with_cache = k_cache[layer_read_index, :, :] value_states_with_cache = v_cache[layer_read_index, :, :] # Case: sliding window -- we need to be careful of read/write order because of chunked prefill, because it's # the only case where you may write over cache you need to use else: # Add the cache to the key and value states mask = (layer_read_index == -1).unsqueeze(-1).unsqueeze(-1) # TODO: should this be precomputed? key_states_with_cache = k_cache[layer_read_index, :, :] key_states_with_cache.masked_scatter_(mask, key_states) value_states_with_cache = v_cache[layer_read_index, :, :] value_states_with_cache.masked_scatter_(mask, value_states) # Write new KV values to the cache k_cache[layer_write_index, :, :] = key_states v_cache[layer_write_index, :, :] = value_states # Return the new KV values return key_states_with_cache, value_states_with_cache def search_prefix_match(self, request_id: str, prompt_ids: list[int]) -> int: """Searches for a prefix match in the cache for the given (prompts_ids). If one is found, we reference the matching blocks in the (request_id), increase the reference count of the blocks and return the number of blocks that match. If no prefix match is found, we return 0.""" current_hash = None allocated_blocks = [] for b in range(len(prompt_ids) // self.block_size): tokens = prompt_ids[b * self.block_size : (b + 1) * self.block_size] # Prefix sharing is only supported when there is only one full attention layer group, so group_id=0. current_hash = self._block_manager.compute_hash(current_hash, tokens, group_id=0) block_id = self._block_manager._hash_to_id.get(current_hash) if block_id is not None: allocated_blocks.append(block_id) self._block_manager.increase_ref_count(block_id) else: break # If we found a matching prefix, we reference the blocks in the request if allocated_blocks: logger.debug(f"Found prefix match for request {request_id} with {len(allocated_blocks)} blocks") cm = self.group_cache_managers[0] cm.block_table[request_id] = allocated_blocks prefix_length = len(allocated_blocks) * self.block_size self._total_prefix_length += prefix_length return prefix_length def mark_shareable_blocks_as_complete(self, state: RequestState, num_complete_blocks: int) -> None: """Marks the blocks allocated to a request (state) as complete if they are shareable and they have been computed in the forward pass. A complete block is a block where the KV cache has been fully computed: if the block has enough space to hold the cache for N tokens, the block is marked as complete when the cache data is present for the N tokens. If block sharing is off, this is a no-op.""" # The status can be FINISHED in async mode, because batch N+1 offloaded the request before batch N was over. So # we need to check for this case to avoid looking in the block table for blocks that no longer exist. if num_complete_blocks == 0 or state.status == RequestStatus.FINISHED: return None for cm in self.group_cache_managers: if cm.uses_block_sharing: self._block_manager.mark_shareable_blocks_as_complete( num_complete_blocks=num_complete_blocks, allocated_blocks=cm.block_table[state.request_id], prompt_ids=(state.initial_tokens + state.generated_tokens), ) def copy_cache(self, list_source_blocks: list[int], list_forked_blocks: list[int]) -> None: """Copy the cache from the source blocks to the forked blocks.""" source_blocks = torch.tensor(list_source_blocks, device=self.device, dtype=torch.int32) forked_blocks = torch.tensor(list_forked_blocks, device=self.device, dtype=torch.int32) for key_cache, value_cache in zip(self.key_cache, self.value_cache): key_cache = key_cache.view(-1, self.block_size, self.num_key_value_heads, self.head_dim) value_cache = value_cache.view(-1, self.block_size, self.num_key_value_heads, self.head_dim) key_cache[forked_blocks] = key_cache[source_blocks] value_cache[forked_blocks] = value_cache[source_blocks] # FIXME: consolidate the cache into a single tensor of shape (group_size, 2, *self.k_or_v_cache_shape) # This will allow for better .update and a single copy instead of one per cache tensor def fork_request(self, source_request_id: str, destination_request_ids: list[str]) -> tuple[list[int], list[int]]: """Fork the cache of a request (state) into the one of a list of requests with the given (dst_request_ids).""" # These lists will be the accumulators for the source and destination blocks for the cache copy source_blocks, destination_blocks = [], [] # Main fork loop for cm in self.group_cache_managers: src_blocks, dst_blocks = cm.fork_blocks(source_request_id, destination_request_ids, self._block_manager) source_blocks.extend(src_blocks) destination_blocks.extend(dst_blocks) return source_blocks, destination_blocks # TODO: rework computation with the groups and their sizes class PagedAttentionMemoryHandler: """A helper class to determine the best number of pages and maximum number of tokens per batch for the paged attention cache, providing automatic sizing based on available GPU memory. The helper works using the number of pages, which is tied to the number of blocks by: num_blocks = num_pages // block_size The memory footprint consists of three main components: - Cache memory: the space needed to store the cache tensors: 2 * layer_group_size * [num_pages, page_size] * cache_dtype - Activation memory: the space temporarily taken by the largest activation during the model forward pass: peak_activation_per_token * max_tokens_per_batch * activation_dtype_size - Static tensors: the space taken by the input/output buffers and metadata tensors for batch processing, sum of: - inputs_ids + outputs_ids + position_ids + logits_indices: 4 * max_tokens_per_batch * int32_size - attention_mask: num_attention_masks * num_pages * max_tokens_per_batch * activation_dtype_size - cumulative_seqlens_q + cumulative_seqlens_k: (1 + 2) * max_tokens_per_batch * int32_size - write_index_tensor: num_groups * max_tokens_per_batch * int32_size - read_index_tensor: num_groups * (num_pages + max_tokens_per_batch) * int32_size The handler can operate in three modes: 1. Auto-sizing: Determines both number of pages and maximum number of tokens per batch using quadratic optimization 2. Fixed cache: Calculates max batch tokens given a fixed number of pages 3. Fixed batch: Calculates number of pages given a fixed maximum batch size """ _activation_dtype = torch.bfloat16 _input_dtype = torch.int32 _upper_bound_max_batch_tokens = 256 _upper_bound_num_blocks = 4096 def __init__( self, block_size: int, page_size: int, num_groups: int, group_size: int, peak_activation_per_token: int, num_attention_masks: int, ) -> None: """Initialize the memory handler with the parameters that cannot be automatically inferred. Args: block_size: Size of the cache blocks page_size: Size of the cache pages num_groups: Number of layer groups group_size: Number of layers per layer group peak_activation_per_token: Maximum size of activation tensor per token, = hidden_size + vocab_size num_attention_masks: Number of attention masks, 0 if no attention mask is used, 2 if hybrid model, else 1 """ self.block_size = block_size self.page_size = page_size self.num_groups = num_groups self.group_size = group_size self.peak_activation_per_token = peak_activation_per_token self.num_attention_masks = num_attention_masks @staticmethod def get_available_memory(max_memory_percent: float = 1.0) -> int: """Calculate available GPU memory for cache allocation, accounting for already allocated tensors. This method queries the current memory state and applies the specified percentage limit to determine how much memory can be safely used for the paged attention cache. Args: max_memory_percent: Fraction of available memory to use (0.0-1.0). 1.0 means use all available memory. Returns: int: Available memory in bytes for cache allocation """ _, total, reserved, allocated = get_device_and_memory_breakdown() available_memory = total - max(allocated, reserved) available_memory = int(available_memory * max_memory_percent) return available_memory def infer_num_blocks_and_max_batch_tokens( self, num_blocks: int | None = None, max_batch_tokens: int | None = None, max_memory_percent: float = 0.8, # FIXME: it seems we overcommit memory, was changed from 0.9 which caused OOMs in our benchmarking CI cache_dtype: torch.dtype = torch.float16, ) -> tuple[int, int]: """Determine optimal number of blocks and maximum number of tokens per batch based on available memory and constraints. Check the class docstring for more details. Naming the number of pages as N and the maximum number of tokens per batch as M, the equation solved is: available_memory = sum([ MN * num_attention_masks * activation_dtype_size, 2N * (layer_group_size * page_size * cache_dtype + 2 * num_group), M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group), ]) where we already simplified int32_size = 4. """ if num_blocks is None: if max_batch_tokens is None: # If neither num_blocks nor max_batch_tokens are provided, we use a second-order polynomial num_blocks, max_batch_tokens = self.compute_num_blocks_and_max_batch_tokens( max_memory_percent, cache_dtype ) else: # If only max_batch_tokens is provided, we infer the num_blocks num_blocks = self.compute_num_blocks(max_batch_tokens, max_memory_percent, cache_dtype) elif max_batch_tokens is None: # If only num_blocks is provided, we infer the max_batch_tokens max_batch_tokens = self.compute_max_batch_tokens(num_blocks, max_memory_percent, cache_dtype) else: # If both num_blocks and max_batch_tokens are provided, we use them (useless, but helps with typing) max_batch_tokens = max_batch_tokens # We check if the memory footprint is too large in all cases available_memory = self.get_available_memory(max_memory_percent) memory_footprint = self.compute_memory_footprint( max_batch_tokens=max_batch_tokens, num_blocks=num_blocks, cache_dtype=cache_dtype ) if memory_footprint > available_memory: raise MemoryError(f"Memory footprint {memory_footprint} is more than available memory {available_memory}") return num_blocks, max_batch_tokens def compute_num_blocks_and_max_batch_tokens( self, max_memory_percent: float, cache_dtype: torch.dtype = torch.float16, m: float = 0.01, ) -> tuple[int, int]: """Calculate optimal number of blocks and maximum number of tokens per batch using quadratic optimization when neither is fixed. This method assumes a relationship M = m * N where m is a small ratio below 1 and solves the resulting quadratic equation to find the optimal N that maximizes utilization within memory constraints. m is the amount of cache we can fill with one batch: m=0.01 means a batch fills at most 1% of the cache. The equation to solve is: available_memory = sum([ m * N^2 * num_attention_masks * activation_dtype_size, 2N * (layer_group_size * page_size * cache_dtype + 2 * num_group), m * N * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group), ]) If num_attention_masks is 0, the equation simplifies to a 1st degree polynomial. """ cache_memory = self.get_available_memory(max_memory_percent) logger.info(f"Cache memory: {cache_memory}") # Compute second-degree polynomial coefficients a = m * self.num_attention_masks * self._activation_dtype.itemsize b = 2 * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups) b += m * (self.peak_activation_per_token * self._activation_dtype.itemsize + 28 + 4 * self.num_groups) c = -cache_memory logger.debug(f"Coefficients of 2nd degree polynomial: {a = }, {b = }, {c = }") # If num_attention_masks is 0, the equation simplifies to a 1st degree polynomial if self.num_attention_masks == 0: greatest_solution = -c / b # Otherwise, we solve the quadratic equation else: discriminant = b**2 - 4 * a * c if discriminant < 0: raise ValueError(f"Discriminant is negative: {discriminant = }") greatest_solution = (-b + sqrt(discriminant)) / (2 * a) if greatest_solution < 0: raise ValueError(f"Greatest solution is negative: {greatest_solution = }") # Infer number of blocks and max batch tokens num_pages = floor(greatest_solution) num_blocks = num_pages // self.block_size if num_blocks > self._upper_bound_num_blocks: logger.info(f"{num_blocks = } is too large, setting to {self._upper_bound_num_blocks = }") num_blocks = self._upper_bound_num_blocks max_batch_tokens = int(greatest_solution * m) if max_batch_tokens > self._upper_bound_max_batch_tokens: logger.info(f"{max_batch_tokens = } is too large, setting to {self._upper_bound_max_batch_tokens = }") max_batch_tokens = self._upper_bound_max_batch_tokens return num_blocks, max_batch_tokens def compute_max_batch_tokens( self, num_blocks: int, max_memory_percent: float, cache_dtype: torch.dtype = torch.float16, ) -> int: """Calculate maximum batch tokens M given a fixed number of cache blocks. The formula for M is given by: M = (available_memory - 2N * (layer_group_size * page_size * cache_dtype + 2 * num_group)) / (activation_dtype_size * (N * num_attention_masks + peak_activation_per_token) + 28 + 4 * num_group) """ cache_memory = self.get_available_memory(max_memory_percent) num_pages = num_blocks * self.block_size # Compute numerator num = cache_memory num -= 2 * num_pages * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups) # Compute denominator denum = self._activation_dtype.itemsize * ( num_pages * self.num_attention_masks + self.peak_activation_per_token ) denum += 28 + 4 * self.num_groups # Compute max batch tokens and return max_batch_tokens = floor(num / denum) if max_batch_tokens > self._upper_bound_max_batch_tokens: logger.info(f"{max_batch_tokens = } is too large, setting to {self._upper_bound_max_batch_tokens = }") max_batch_tokens = self._upper_bound_max_batch_tokens return max_batch_tokens def compute_num_blocks( self, max_batch_tokens: int, max_memory_percent: float, cache_dtype: torch.dtype = torch.float16, ) -> int: """Calculate number of cache blocks N given a fixed maximum token per token M. The formula for N is given by: N = (available_memory - M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group)) / (2 * (layer_group_size * page_size * cache_dtype + 2 * num_group) + M * (num_attention_masks * activation_dtype_size)) """ cache_memory = self.get_available_memory(max_memory_percent) # Compute numerator num = cache_memory num -= max_batch_tokens * self.peak_activation_per_token * self._activation_dtype.itemsize num -= max_batch_tokens * (28 + 4 * self.num_groups) # Compute denominator denum = 2 * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups) denum += max_batch_tokens * (self.num_attention_masks * self._activation_dtype.itemsize) denum += max_batch_tokens * self._activation_dtype.itemsize # Compute cache size and return number of blocks num_pages = floor(num / denum) num_blocks = num_pages // self.block_size if num_blocks > self._upper_bound_num_blocks: logger.info(f"{num_blocks = } is too large, setting to {self._upper_bound_num_blocks = }") num_blocks = self._upper_bound_num_blocks return num_blocks def compute_memory_footprint( self, num_blocks: int, max_batch_tokens: int, cache_dtype: torch.dtype, ) -> int: """Calculate the memory footprint breakdown for a given number of blocks and maximum batch tokens. The memory footprint is given by: available_memory = sum([ MN * num_attention_masks * activation_dtype_size, 2N * (layer_group_size * page_size * cache_dtype + 2 * num_group), M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group), ]) but is broken down below. """ num_pages = num_blocks * self.block_size cache_memory_footprint = 2 * self.group_size * num_pages * self.page_size * cache_dtype.itemsize activation_memory_footprint = self.peak_activation_per_token * self._activation_dtype.itemsize activation_memory_footprint *= max_batch_tokens inputs_outputs_positions_and_logits_memory_footprint = 4 * max_batch_tokens * 4 # second 4 is for int32 size attention_memory_footprint = self.num_attention_masks * self._activation_dtype.itemsize attention_memory_footprint *= num_pages * max_batch_tokens cumulative_seqlens_memory_footprint = 3 * max_batch_tokens * 4 # 4 is for int32 size write_index_memory_footprint = self.num_groups * max_batch_tokens * 4 # 4 is for int32 size read_index_memory_footprint = self.num_groups * (num_pages + max_batch_tokens) * 4 # 4 is for int32 size total_memory_footprint = sum( [ cache_memory_footprint, activation_memory_footprint, inputs_outputs_positions_and_logits_memory_footprint, attention_memory_footprint, cumulative_seqlens_memory_footprint, write_index_memory_footprint, read_index_memory_footprint, ] ) return total_memory_footprint
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/generation/continuous_batching/cache.py", "license": "Apache License 2.0", "lines": 628, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/generation/continuous_batching/continuous_api.py
# Copyright 2024 The HuggingFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import queue import threading from abc import abstractmethod from collections.abc import Generator from contextlib import contextmanager from math import ceil from time import perf_counter import torch from torch import nn from tqdm import tqdm from tqdm.contrib.logging import logging_redirect_tqdm from ...configuration_utils import PretrainedConfig from ...generation.configuration_utils import CompileConfig, GenerationConfig from ...generation.logits_process import LogitsProcessorList from ...utils.logging import logging from ...utils.metrics import ContinuousBatchProcessorMetrics, attach_tracer, traced from .cache import PagedAttentionCache from .input_outputs import ContinuousBatchingAsyncIOs, ContinuousBatchingIOs from .requests import GenerationOutput, RequestState, RequestStatus, logger from .scheduler import SCHEDULER_MAPPING, FIFOScheduler, Scheduler from .utils import attn_mask_is_needed, pad_to_interval """ To enable cuda graphs, we need the dimensions of all tensors to be static, which is counter-intuitive for CB. In CB, as generation goes on, there are two dimensions that change: - the number of queries tokens (Q), which can vary from batch to batch - the number of keys/values tokens (KV), which grows as the cache does To solve this, we slice along those dimensions to fixed lengths. The size of the slices is controlled by interval sizes: - Q_PADDING_INTERVAL_SIZE: the padding granularity for queries (in tokens) - KV_PADDING_INTERVAL_SIZE: the padding granularity for KV cache (in tokens) For example, with Q_PADDING_INTERVAL_SIZE=64 and an actual query length of 100, we pad to 128 tokens. Smaller intervals mean finer granularity and thus less padding, but more unique graph signatures. Since graphs take memory and time to create, we use an LRU cache with a fixed size to limit memory usage. Good defaults: - Q: 64 tokens gives ~4 graphs for max_batch_tokens=256, which is a good balance - KV: 8192 tokens (256 blocks at block_size=32) gives reasonable granularity for large caches The maximum number of cached graphs is controlled by MAX_CACHED_GRAPHS (default 32), which uses LRU eviction. """ Q_PADDING_INTERVAL_SIZE = 64 KV_PADDING_INTERVAL_SIZE = 512 * 32 # 512 blocks of 32 tokens (interval size is in tokens for both Q and KV) MAX_CACHED_GRAPHS = 32 # We cannot use `PreTrainedModel` for circular import reasons, so this helps keep track of the basic types class ProtoPretrainedModel(nn.Module): config: PretrainedConfig dtype: torch.dtype device: torch.device @abstractmethod def set_attn_implementation(self, attn_implementation: str) -> None: pass @abstractmethod def _get_logits_processor(self, generation_config: GenerationConfig) -> LogitsProcessorList: pass # Continuous Batch Processor (Internal Logic) @attach_tracer() class ContinuousBatchProcessor: inputs_and_outputs: ContinuousBatchingIOs | ContinuousBatchingAsyncIOs def __init__( self, cache: PagedAttentionCache, config: PretrainedConfig, generation_config: GenerationConfig, input_queue: queue.Queue, output_queue: queue.Queue, stop_event: threading.Event, model_device: torch.device, model_dtype: torch.dtype, scheduler: Scheduler, manual_eviction: bool, use_cuda_graph: bool, q_padding_interval_size: int, kv_padding_interval_size: int, max_cached_graphs: int, use_async_batching: bool, ) -> None: """Initialize the continuous batch processor. Args: cache: A [`PagedAttentionCache`] object config: The model configuration generation_config: The generation configuration input_queue: Queue for incoming requests output_queue: Queue for outgoing results stop_event: Event to signal processing should stop model_device: Device for model inputs/outputs model_dtype: Data type for model inputs/outputs scheduler: The [`Scheduler`] to use manual_eviction: Whether to manually evict blocks from the cache use_cuda_graph: Whether to use cuda graphs or not during CB. Check the docstring at the top of the file for more details. q_padding_interval_size: Padding granularity for queries in tokens. kv_padding_interval_size: Padding granularity for KV cache in tokens. max_cached_graphs: Maximum number of CUDA graphs to cache. Uses LRU eviction when full. """ self.cache = cache self.config = config self.generation_config = generation_config self.input_queue = input_queue self.output_queue = output_queue self.stop_event = stop_event self.model_device = model_device self.model_dtype = model_dtype self.scheduler = scheduler self.manual_eviction = manual_eviction # Retrieve the size of the sliding window if there is one self.sliding_window = 1 if getattr(config, "sliding_window", None) is None else config.sliding_window # Cuda graphs for the generation step self.q_padding_interval_size = q_padding_interval_size self.kv_padding_interval_size = kv_padding_interval_size self.max_cached_graphs = max_cached_graphs self.use_cuda_graph = use_cuda_graph # Compile-related arguments self.compile_config: CompileConfig | None = getattr(generation_config, "compile_config", None) self._forward_process_and_sample_is_compiled = False self._pad_inputs = use_cuda_graph or (self.compile_config is not None and not self.compile_config.dynamic) # Set up metrics collector self.max_batch_tokens = cache.max_batch_tokens self.metrics = ContinuousBatchProcessorMetrics(cache.max_batch_tokens) # Setup inputs and outputs self.use_async_batching = use_async_batching if self.use_async_batching: # Since in async there are 2 IO pairs, there are also 2 graph buffers: we divide the max_cached_graphs by 2 max_cached_graphs = ceil(max_cached_graphs / 2) self.inputs_and_outputs = ContinuousBatchingAsyncIOs( cache, config, model_device, model_dtype, max_cached_graphs ) else: self.inputs_and_outputs = ContinuousBatchingIOs( cache, config, model_device, model_dtype, max_cached_graphs ) def __repr__(self) -> str: return ( f"ContinuousBatchProcessor(input_queue={self.input_queue}, output_queue={self.output_queue}, " f"active_requests={self.scheduler.active_requests}, waiting_requests={self.scheduler.waiting_requests})" + self.inputs_and_outputs.get_model_kwargs().__repr__() ) @traced def _get_new_requests(self) -> None: """Pull new requests from the input queue and add to waiting list.""" while not self.input_queue.empty(): try: state = self.input_queue.get_nowait() if state is None: # Sentinel value continue self.scheduler.add_waiting_request(state) except queue.Empty: break except Exception as e: logger.error(f"Error processing new request: {e}", exc_info=True) state: RequestState = locals().get("state") # type:ignore if state is not None: self._handle_request_error(e, state) @traced def _handle_request_error(self, error: Exception, state: RequestState) -> None: """Handle general request processing error.""" state.status = RequestStatus.FAILED state.error = str(error) # Include any generated tokens if this is an active request if isinstance(state.request_id, str): state.generated_tokens = self.scheduler.get_active_request_static_outputs(state.request_id) else: state.generated_tokens = [] self.metrics.record_request_completion(state.created_time, state.request_id) self.output_queue.put(state.to_generation_output()) # TODO: there should be a way to choose the offloading policy: biggest request, oldest request, etc. # Including a policy to not allow offloading and crashing the generation def soft_reset_one_request(self) -> None: """Soft resets one active request by removing it from active requests and re-adding it to the waiting queue. The generated tokens are kept as part of the new request's initial prompt. When `block_new_requests` is False, the oldest request is offloaded; when True, the newest request is offloaded. This method also sets `block_new_requests` to True to prevent infinite loops of offloading and re-scheduling requests. """ # The offloaded request is the newest (resp. oldest) if block_new_requests is True (resp. False) if self.scheduler.block_new_requests: request_id, state = self.scheduler.active_requests.popitem() else: request_id, state = next(iter(self.scheduler.active_requests.items())) logger.info( f"Soft resetting request {request_id} with {len(state.initial_tokens)} initial tokens and " f"{len(state.generated_tokens)} generated tokens" ) # Create a copy of the offloaded request keeping the generated tokens as addition to the initial prompt new_state = state.create_equivalent_initial_request() # In async mode, this ensures the request is not updated in the other batch without triggering logging state._status = RequestStatus.FINISHED # Actual offloading of the request self.scheduler.finish_request(request_id, evict_from_cache=True) self.scheduler.add_waiting_request(new_state) # This flag blocks any new requests from being scheduled until one request is finished. This ensures that we # don't enter an offload / schedule loop self.scheduler.block_new_requests = True @traced def prepare_next_batch(self) -> bool: """Prepare tensors and metadata for the next model forward pass. Returns True if there are requests to process, False otherwise.""" # Get new requests from the queue, stop if there are no pending requests self._get_new_requests() self.scheduler.clear_cancelled_requests() if not self.scheduler.has_pending_requests(): return False self.metrics.record_queue_metrics(len(self.scheduler.active_requests), len(self.scheduler.waiting_requests)) # Schedule the next batch of requests, stop if there are no requests in the batch requests_in_batch = self.scheduler.schedule_batch(self.max_batch_tokens, self.cache.num_pages) # If requests_in_batch is None, it means we need to offload some requests if possible if requests_in_batch is None: if len(self.scheduler.active_requests) > 1: self.soft_reset_one_request() return False else: raise RuntimeError("No requests can be scheduled and no request can be offloaded.") # If it's an empty list, it means we have no requests to process if not requests_in_batch: return False # Otherwise, we can continue with the non-empty batch self.metrics.record_batch_metrics(requests_in_batch) self.inputs_and_outputs.prepare_batch_tensors(requests_in_batch) # Record the memory metrics of the KV cache self.metrics.record_kv_cache_memory_metrics(self.cache) if logger.isEnabledFor(logging.DEBUG): actual_query_length, actual_key_length = self.inputs_and_outputs.get_actual_lengths()[:2] logger.debug( f"Scheduled: {len(requests_in_batch)}, Waiting: {len(self.scheduler.waiting_requests)}, " f"Active: {len(self.scheduler.active_requests)}. cum Q: {actual_query_length}. " f"cum KV: {actual_key_length}, free blocks: {self.cache.get_num_free_blocks()}" ) return True @traced def _maybe_send_output(self, state: RequestState) -> None: """Send output to the queue based on streaming mode and request state.""" if state.streaming or state.status == RequestStatus.FINISHED: self.output_queue.put(state.to_generation_output()) @traced def update_batch(self) -> None: """Update request states based on generated tokens.""" requests_in_batch, new_tokens = self.inputs_and_outputs.prepare_batch_update() current_logits_index = 0 for future_state in requests_in_batch: state = future_state.state # Early return if the request is finished if state.status == RequestStatus.FINISHED: if self.use_async_batching: # Skip this request, but still consume its token from new_tokens if it had one if future_state.has_new_token: current_logits_index += 1 continue raise RuntimeError(f"Tried to update FINISHED request {state.request_id} in sync mode.") # If the request has a new token, it means prefill has already ended or just finished if future_state.has_new_token: # If there is just one temporary token, it means prefill just ended if state.generated_len() == 0: self.metrics.record_ttft_metric(state.created_time, state.request_id) state.status = RequestStatus.DECODING token = new_tokens[current_logits_index] current_logits_index += 1 # Update the request and stop if it is complete is_finished = state.update_and_check_completion(token) # We mark the completed blocks as such self.cache.mark_shareable_blocks_as_complete(state, future_state.complete_blocks) if is_finished: self.metrics.record_request_completion(state.created_time, state.request_id) self.scheduler.finish_request(state.request_id, evict_from_cache=(not self.manual_eviction)) self.scheduler.block_new_requests = False self._maybe_send_output(state) # Otherwise, the request is still prefilling, but the prefill has been split elif state.status == RequestStatus.PREFILLING: self.cache.mark_shareable_blocks_as_complete(state, future_state.complete_blocks) # If some requests need to be forked, we do it now copy_source, copy_destination = [], [] while self.scheduler._requests_to_fork: # Get the number of children and reset it so it's not forked again state_to_fork = self.scheduler._requests_to_fork.pop() num_children = state_to_fork.num_children state_to_fork.num_children = 0 # Create the new request and add them to the scheduler new_request_ids = [f"{state_to_fork.request_id}__child#{i}" for i in range(num_children)] for new_request_id in new_request_ids: self.scheduler.active_requests[new_request_id] = state_to_fork.fork(new_request_id) # Fork the cache copy_src, copy_dst = self.cache.fork_request(state_to_fork.request_id, new_request_ids) copy_source.extend(copy_src) copy_destination.extend(copy_dst) # FIXME: if fork cant be done, create a new pending request without forking instead of crashing everything # The copy induced by the fork is done in one go (if it's even needed) if copy_source: self.cache.copy_cache(copy_source, copy_destination) @traced def has_pending_requests(self) -> bool: """Check if there are any active or waiting requests.""" return self.scheduler.has_pending_requests() @traced def handle_batch_error(self, error): """Handle errors during batch processing.""" failed_future_states = self.inputs_and_outputs.prepare_batch_update()[0] for future_state in failed_future_states: self._handle_request_error(error, future_state.state) self.scheduler.finish_request(future_state.state.request_id) @traced def fail_all_requests(self, error: Exception) -> None: """Fail all active requests with the given error. Args: error: The error to report in the failure message """ requests = list(self.scheduler.active_requests.values()) for state in requests: self._handle_request_error(error, state) self.scheduler.finish_request(state.request_id) # Also fail any requests in the waiting queue for req_id in list(self.scheduler.waiting_requests.keys()): state = self.scheduler.waiting_requests.pop(req_id) self._handle_request_error(error, state) # Clear the ordering queue self.scheduler.waiting_requests_order.clear() @traced @torch.no_grad() def _generation_step(self, model: nn.Module, logit_processor: LogitsProcessorList, do_sample: bool) -> None: """Perform a single generation step.""" # If a compile config is specified, we compile the forward pass once in a wrapper if self.compile_config is not None and not self._forward_process_and_sample_is_compiled: self._forward_process_and_sample = torch.compile( self._forward_process_and_sample, fullgraph=self.compile_config.fullgraph, mode=self.compile_config.mode, dynamic=self.compile_config.dynamic, backend=self.compile_config.backend, options=self.compile_config.options, ) self._forward_process_and_sample_is_compiled = True # If inputs are static sized, we find the padded sizes of the queries and keys/values if self._pad_inputs: actual_query_length, _, _, actual_read_sizes, _ = self.inputs_and_outputs.get_actual_lengths() padded_q = pad_to_interval(actual_query_length, self.q_padding_interval_size, self.max_batch_tokens) max_read_index_size = max(actual_read_sizes) padded_read_index_size = pad_to_interval( max_read_index_size, self.kv_padding_interval_size, self.cache.num_pages ) else: padded_q, padded_read_index_size = 0, 0 # Retrieve the model kwargs with or without padding batch_data = self.inputs_and_outputs.get_model_kwargs(padded_q, padded_read_index_size) compute_stream = self.inputs_and_outputs.compute_stream # If we are not using cuda graphs, we perform the generation step and return if not self.use_cuda_graph: with torch.cuda.stream(compute_stream): self._forward_process_and_sample(model, batch_data, logit_processor, do_sample) # Otherwise, we use create or replay the graph else: graph = self.inputs_and_outputs.graphs.get_graph(padded_q, padded_read_index_size) # Case: the graph already exists, so we replay it if graph is not None: with torch.cuda.stream(compute_stream): graph.replay() # Otherwise, the graph does not exist, so we create it else: logger.info(f"Creating graph for {(padded_q, padded_read_index_size) = }") # TODO: remove this once we are sure there are no race conditions # compute_stream.wait_stream(torch.cuda.current_stream()) # Warmup with torch.cuda.stream(compute_stream): self._forward_process_and_sample(model, batch_data, logit_processor, do_sample) # torch.cuda.current_stream().wait_stream(compute_stream) # Capture graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=compute_stream): self._forward_process_and_sample(model, batch_data, logit_processor, do_sample) # Store self.inputs_and_outputs.graphs.set_graph(padded_q, padded_read_index_size, graph) # In any case, we transfer the outputs to the host self.inputs_and_outputs.retrieve_device_outputs() @traced def _forward_process_and_sample( self, model: nn.Module, batch_data: dict, logit_processor: LogitsProcessorList, do_sample: bool, ) -> None: """This function performs the forward pass, logits processing, and sampling; which are broken down into smaller function to be easier to trace with OpenTelemetry.""" self.inputs_and_outputs.carry_over_tokens(batch_data["input_ids"]) logits = self._model_forward(model, batch_data) # if self.log_prob_generation: batch_processor.output_probs.copy_(logits) # TODO probs = self._process_logit(batch_data, logits, logit_processor) self._sample(probs, batch_data, do_sample) @traced(span_name="model_forward") def _model_forward(self, model: nn.Module, batch_data: dict) -> torch.Tensor: return model(**batch_data).logits @traced(span_name="logit_processing") def _process_logit( self, batch_data: dict, logits: torch.Tensor, logit_processor: LogitsProcessorList ) -> torch.Tensor: # Pass continuous batching context to logits processor if it supports it. if hasattr(logit_processor, "set_continuous_batching_context"): logit_processor.set_continuous_batching_context(batch_data["logits_indices"], batch_data["cu_seq_lens_q"]) # Handle shape compatibility: logit processors expect 2D tensors [batch_size, vocab_size] # but continuous batching always produces 3D tensors [batch_size, seq_len, vocab_size] batch_size, seq_len, vocab_size = logits.shape # NOTE: to be an exact match with generate, we should also convert logits2d to float32 here, but it's not needed in practice logits_2d = logits.view(batch_size * seq_len, vocab_size) input_ids_2d = batch_data["input_ids"].view(batch_size * seq_len) # Process with 2D tensors# processed_logits_2d = logit_processor(input_ids_2d, logits_2d) # type: ignore[arg-type] # Reshape back to 3D return processed_logits_2d.view(batch_size, seq_len, vocab_size) @traced(span_name="sampling") def _sample(self, probs: torch.Tensor, batch_data: dict, do_sample: bool) -> None: if do_sample: probs = nn.functional.softmax(probs, dim=-1) # probs[0] has shape [seq_len, vocab_size], multinomial returns [seq_len, 1] next_tokens = torch.multinomial(probs[0], num_samples=1).squeeze(-1) # Now [seq_len] else: next_tokens = torch.argmax(probs, dim=-1) # shape is [1, seq_len] next_tokens = next_tokens.squeeze(0) # shape is [seq_len] tokens = next_tokens.size(0) # Get seq_len dimension # indices = batch_data["logits_indices"][:tokens] next_tokens = next_tokens[indices] self.inputs_and_outputs.output_ids[:tokens].copy_(next_tokens) # Manager Class (User Interface) @attach_tracer() class ContinuousBatchingManager: """Manager for handling continuous batching of generation requests. This class provides the user interface for submitting generation requests, retrieving results, and managing the background generation thread. """ def __init__( self, model: ProtoPretrainedModel, generation_config: GenerationConfig, manual_eviction: bool = False, max_queue_size: int = 0, q_padding_interval_size: int = 0, kv_padding_interval_size: int = 0, max_cached_graphs: int = 0, allow_block_sharing: bool = True, use_async_batching: bool | None = None, ) -> None: """Initialize the continuous batching manager. Args: model: The language model for generation generation_config: Configuration for generation parameters max_queue_size: Maximum size of the request queue (0 = unlimited) q_padding_interval_size: (optional) Padding granularity for queries in tokens. 0 uses default. kv_padding_interval_size: (optional) Padding granularity for KV cache in tokens. 0 uses default. max_cached_graphs: (optional) Maximum number of cached CUDA graphs. 0 uses default. allow_block_sharing: (optional) Whether to allow block sharing if the model has some full attention layers use_async_batching: Whether to use async API or not. If None, will be automatically detected. """ # Reload paged version of the attention implementation if necessary if "paged|" not in model.config._attn_implementation: model.set_attn_implementation(f"paged|{model.config._attn_implementation}") # Internal arguments self.model = model.eval() self.manual_eviction = manual_eviction self._allow_block_sharing = allow_block_sharing self._use_prefix_sharing = allow_block_sharing # approximation until the cache is created self.input_queue = queue.Queue(maxsize=max_queue_size) self.output_queue = queue.Queue() self.stop_event = threading.Event() self.batch_processor: ContinuousBatchProcessor | None = None self._generation_thread = None self._request_counter = 0 self._request_lock = threading.Lock() # Generation config related arguments generation_config = model.generation_config if generation_config is None else generation_config self.generation_config = generation_config self.log_prob_generation = getattr(generation_config, "log_prob_generation", False) self.do_sample = getattr(generation_config, "do_sample", True) self.logit_processor: LogitsProcessorList = self.model._get_logits_processor(generation_config) num_return_sequences = getattr(generation_config, "num_return_sequences", None) self.num_return_sequences = num_return_sequences if num_return_sequences is not None else 1 # self.model.generation_config.top_p = None NOTE: figure out why this was here # Cuda graph behavior is determined below using either user-specified arguments or heuristics self.use_cuda_graph = self._decide_use_cuda_graphs( use_cuda_graph=getattr(generation_config, "use_cuda_graph", None), user_specified_param=bool(q_padding_interval_size or kv_padding_interval_size or max_cached_graphs), compile_config=getattr(generation_config, "compile_config", None), ) # If the user specifies to use async or not, no need to decide ourselves if use_async_batching is not None: self.use_async_batching = use_async_batching # Otherwise, we enable async batching if there are no attn masks, because they add a lot of host-to-device # transfers, and if CUDA graphs are not turned off, because that would mean we are trying to save memory else: self.use_async_batching = self.use_cuda_graph and not attn_mask_is_needed(self.model.config) # Padding interval sizes for Q and KV (0 means use defaults) self.q_padding_interval_size = ( q_padding_interval_size if q_padding_interval_size > 0 else Q_PADDING_INTERVAL_SIZE ) self.kv_padding_interval_size = ( kv_padding_interval_size if kv_padding_interval_size > 0 else KV_PADDING_INTERVAL_SIZE ) self.max_cached_graphs = max_cached_graphs if max_cached_graphs > 0 else MAX_CACHED_GRAPHS # Log probability generation is not supported yet (TODO) if self.log_prob_generation: raise NotImplementedError("log_prob_generation is not supported yet") def _decide_use_cuda_graphs( self, use_cuda_graph: bool | None, user_specified_param: int, compile_config: CompileConfig | None, ) -> bool: """Returns whether or not to use cuda graphs for continuous batching, depending on the following criteria: - (use_cuda_graph) which is the user choice - (user_specified_param): a boolean indicating if the user specified a parameter related to cuda graphs If none of the above criteria are met, we use a default heuristic based on the attention implementation: we turn on cuda graphs if and only if no attention mask is needed. """ # If cuda is not available, we cannot use cuda graphs if not torch.cuda.is_available(): if use_cuda_graph: logger.warning(f"use_cuda_graph is True but {torch.cuda.is_available() = }: turning off cuda graphs.") return False # If use_cuda_graph is specified, we follow the user's choice if use_cuda_graph is not None: return use_cuda_graph # If the user specified a parameter related to cuda graphs, we activate cuda graphs if user_specified_param: return True # If a compile config was found, turn off cuda graphs if the compile config already uses them if compile_config is not None: options = torch._inductor.list_mode_options().get(compile_config.mode, compile_config.options) compile_uses_cudagraphs = options.get("triton.cudagraphs", False) if compile_uses_cudagraphs: logger.warning( f"Compile config {compile_config.mode = } uses cudagraphs, which usually does not work well with " "continuous batching. We recommend using mode 'default' or 'max-autotune-no-cudagraphs' instead." ) return not compile_uses_cudagraphs # TODO: should this also match the dynamic shapes? # Otherwise we have a default heuristic based on the attention implementation: # attention implementations where an attention mask is needed suffer a lot more from the padding associated # with cuda graphs, so default is to turn cuda graphs off for those implementations use_cuda_graph = not attn_mask_is_needed(self.model.config) logger.warning( f"No behavior specified for use_cuda_graph, defaulting to {use_cuda_graph = } because " f"{self.model.config._attn_implementation = }. If you want to save memory, turn off cuda graphs, but " "they tend to improve performances by a lot." ) return use_cuda_graph @traced def start(self) -> None: """Start the background generation thread.""" if self._generation_thread is not None and self._generation_thread.is_alive(): logger.warning("Manager thread is already running.") return self._generation_thread = threading.Thread(target=self._run_generation_loop) self._generation_thread.start() def is_running(self) -> bool: """Check if the background generation thread is running.""" return self._generation_thread is not None and self._generation_thread.is_alive() # NOTE: don't forget to update `continuous_batching_context_manager` when changing this method's definition def stop(self, block: bool = True, timeout: float | None = None) -> None: """Signal the background thread to stop. Args: block: Whether to wait for the thread to stop timeout: Maximum time to wait for the thread to stop """ if self.batch_processor is None: logger.warning("\nBatch processor was not initialized.") else: if self.batch_processor.cache.use_prefix_sharing: logger.info( f"\nPrefix sharing was on. Total prefix length: {self.batch_processor.cache._total_prefix_length}" ) if self._generation_thread is None: logger.warning("Manager not started.") return stop_trigger_time = perf_counter() if not self.stop_event.is_set(): self.stop_event.set() logger.info("Stopping continuous batching manager...") if block: self.join(stop_trigger_time, timeout) self.batch_processor = None def join(self, stop_trigger_time: float, timeout: float | None = None) -> None: """Wait for the background thread to finish. Args: timeout: Maximum time to wait for the thread to stop """ if self._generation_thread is not None: self._generation_thread.join(timeout=timeout) if self._generation_thread.is_alive(): logger.warning(f"Generation thread did not exit after join timeout ({timeout}).") else: end = perf_counter() logger.info(f"Continuous Batching Manager stopped after {end - stop_trigger_time:.2f}s.") self._generation_thread = None def add_request( self, input_ids: list[int], request_id: str | None = None, max_new_tokens: int | None = None, streaming: bool = False, record_timestamps: bool = False, ) -> str: """Add a new generation request to the queue. Args: input_ids: Input token IDs to use as prompt request_id: Optional custom request ID (auto-generated if None) **kwargs: Additional generation parameters Returns: str: The request ID """ if request_id is None: with self._request_lock: request_id = f"req_{self._request_counter}" self._request_counter += 1 max_new_tokens = self.generation_config.max_new_tokens if max_new_tokens is None else max_new_tokens # NOTE: do we want to handle a case when the user wants token ids returned instead of decoded text? state = RequestState( request_id=request_id, initial_tokens=list(input_ids), num_children=self.num_return_sequences - 1, record_timestamps=record_timestamps, max_new_tokens=max_new_tokens, eos_token_id=self.generation_config.eos_token_id, streaming=streaming, ) # Use block=True with timeout to handle backpressure if queue is full self.input_queue.put(state, block=True, timeout=10) # XXX: pass timeout as fn arg? return request_id def add_requests( self, inputs: list[list[int]], max_new_tokens: int | None = None, streaming: bool = False, record_timestamps: bool = False, ) -> None: # Infer the request ids of all incoming requests with self._request_lock: request_ids = [f"req_{i}" for i in range(self._request_counter, self._request_counter + len(inputs))] self._request_counter += len(inputs) # If there is prefix sharing, we sort the inputs to maximize cache hits but keep the order of the requests ids_and_inputs = list(zip(request_ids, inputs)) if self._use_prefix_sharing: ids_and_inputs = sorted(ids_and_inputs, key=lambda x: x[1], reverse=True) # Add requests in order for request_id, input_ids in ids_and_inputs: self.add_request(input_ids, request_id, max_new_tokens, streaming, record_timestamps) def cancel_request(self, request_id: str) -> None: """Cancel a request by its ID. Args: request_id: The ID of the request to cancel """ if self.batch_processor is not None: self.batch_processor.scheduler.set_request_cancellation(request_id) # TODO:handle benchmarking properly when updating / fixing the requeue logic def get_result(self, request_id: str | None = None, timeout: float | None = None) -> GenerationOutput | None: """Retrieve one result from the output queue. Args: timeout: Maximum time to wait for a result Returns: Optional[GenerationOutput]: The result data or None if timeout """ if self._generation_thread is None and self.output_queue.empty(): return None try: result = self.output_queue.get(block=True, timeout=timeout) # NOTE: requeue logic here if request_id is not None and result.request_id != request_id: self.output_queue.put(result) return None return result except queue.Empty: return None def __iter__(self): """Iterate over results as they become available.""" while self._generation_thread is not None and self._generation_thread.is_alive(): result = self.get_result(timeout=0.1) if result is not None: yield result # FIXME: stop iteration when request status is finished? def request_id_iter(self, request_id: str) -> Generator[GenerationOutput]: """Iterate over results matching a specific request id as they become available.""" request_cancelled = False while self._generation_thread is not None and self._generation_thread.is_alive() and not request_cancelled: result = self.get_result(request_id=request_id, timeout=0.1) if result is not None: yield result if self.batch_processor is not None: request_cancelled = self.batch_processor.scheduler.request_is_cancelled(request_id) @traced def _generation_step(self) -> None: """Perform a single generation step. This is mostly cuda graphed""" if self.batch_processor is None: raise RuntimeError("Tried to perform a generation step before the batch processor was initialized.") self.batch_processor._generation_step(self.model, self.logit_processor, self.do_sample) def _run_generation_loop(self) -> None: """Main processing loop running in the background thread.""" batch_processor: ContinuousBatchProcessor | None = None try: t0 = perf_counter() paged_attention_cache = PagedAttentionCache( self.model.config, self.generation_config, self.model.device, self.model.dtype, tp_size=getattr(self.model, "_tp_size", None), # Use model's actual TP setting allow_block_sharing=self._allow_block_sharing, ) self._use_prefix_sharing = paged_attention_cache.use_prefix_sharing # update the approximation logger.debug(f"PagedAttentionCache created in {perf_counter() - t0} seconds") scheduler = None if hasattr(self.generation_config, "scheduler"): scheduler = SCHEDULER_MAPPING.get(self.generation_config.scheduler, None) if scheduler is None: logger.warning(f"Scheduler '{scheduler}' not found. Defaulting to FIFO.") scheduler = FIFOScheduler else: # Default to fifo scheduler = FIFOScheduler t1 = perf_counter() batch_processor = ContinuousBatchProcessor( cache=paged_attention_cache, config=self.model.config, generation_config=self.generation_config, input_queue=self.input_queue, output_queue=self.output_queue, stop_event=self.stop_event, model_device=self.model.device, model_dtype=self.model.dtype, scheduler=scheduler(paged_attention_cache, self.manual_eviction), manual_eviction=self.manual_eviction, use_cuda_graph=self.use_cuda_graph, q_padding_interval_size=self.q_padding_interval_size, kv_padding_interval_size=self.kv_padding_interval_size, max_cached_graphs=self.max_cached_graphs, use_async_batching=self.use_async_batching, ) self.batch_processor = batch_processor self.current_batch = 0 logger.debug(f"batch_processor created in {perf_counter() - t1} seconds") # If using the async API, we bootstrap the first batch w/out update if self.batch_processor.use_async_batching: if not batch_processor.prepare_next_batch(): raise RuntimeError("Failed to bootstrap the first batch.") self._generation_step() self.current_batch += 1 while (not self.stop_event.is_set()) or batch_processor.has_pending_requests(): self._inner_generation_loop(batch_processor) self.current_batch += 1 # In async mode, the last batch's results are still in flight - process them now # We need to switch back to the pair that has the last batch's D2H pending if isinstance(batch_processor.inputs_and_outputs, ContinuousBatchingAsyncIOs): batch_processor.inputs_and_outputs.current_pair = 1 - batch_processor.inputs_and_outputs.current_pair batch_processor.update_batch() except Exception as e: logger.error(f"Error in generation loop: {e}", exc_info=True) self._handle_critical_error(e, batch_processor) finally: logger.info("Generation loop finished.") @traced(span_name="generation_loop") def _inner_generation_loop(self, batch_processor: ContinuousBatchProcessor) -> None: # Loop body ends if there is no requests in the batch if not batch_processor.prepare_next_batch(): return self._generation_step() batch_processor.update_batch() @traced def _handle_critical_error(self, error: Exception, batch_processor: ContinuousBatchProcessor | None) -> None: """Handle critical errors that terminate the generation loop.""" # Signal stop self.stop_event.set() # Fail pending requests in input queue try: while True: req_data = self.input_queue.get_nowait() if batch_processor is not None: batch_processor._handle_request_error(error, req_data) except queue.Empty: pass # Fail active requests if batch_processor is not None: batch_processor.fail_all_requests(error) @traced def evict_request_from_cache(self, request_id: str) -> None: """Evict a request from the cache. It is assumed that the request is already finished.""" if not self.manual_eviction: raise RuntimeError("Manual eviction is not enabled for this manager.") if self.batch_processor is not None: self.batch_processor.scheduler.finish_request(request_id) class ContinuousMixin: """Mixin class for models to add continuous batching capabilities.""" generation_config: GenerationConfig @contextmanager def continuous_batching_context_manager( self, generation_config: GenerationConfig | None = None, manual_eviction: bool = False, max_queue_size: int = 0, q_padding_interval_size: int = 0, kv_padding_interval_size: int = 0, allow_block_sharing: bool = True, block: bool = True, timeout: float | None = None, use_async_batching: bool | None = None, # leave to None for automatic detection max_cached_graphs: int = 0, ) -> Generator[ContinuousBatchingManager]: manager = self.init_continuous_batching( generation_config=generation_config, manual_eviction=manual_eviction, max_queue_size=max_queue_size, q_padding_interval_size=q_padding_interval_size, kv_padding_interval_size=kv_padding_interval_size, max_cached_graphs=max_cached_graphs, allow_block_sharing=allow_block_sharing, use_async_batching=use_async_batching, ) manager.start() try: yield manager finally: logger.debug( "Continuous batching loop finished" ) # a dummy log needed for the logs of stop to show. Won't show manager.stop(block=block, timeout=timeout) # NOTE: don't forget to update `continuous_batching_context_manager` when changing this method's definition def init_continuous_batching( self, generation_config: GenerationConfig | None = None, manual_eviction: bool = False, max_queue_size: int = 0, q_padding_interval_size: int = 0, kv_padding_interval_size: int = 0, allow_block_sharing: bool = True, use_async_batching: bool | None = None, max_cached_graphs: int = 0, ) -> ContinuousBatchingManager: """Initialize a manager for continuous batching inference. Args: generation_config: An optional generation configuration, which may contain a CompileConfig object manual_eviction: Whether to manually evict requests from the cache max_queue_size: Maximum size of the input request queue q_padding_interval_size: Padding granularity for queries in tokens. 0 uses default. kv_padding_interval_size: Padding granularity for KV cache in tokens. 0 uses default. allow_block_sharing: A flag to allow block sharing if the model has some full attention layers use_async_batching: Whether to use async API or not. If None, will be automatically detected. max_cached_graphs: Maximum number of cached CUDA graphs. 0 uses default. Returns: `ContinuousBatchingManager`: The manager instance to add requests and retrieve results. """ if not hasattr(self, "config") or not hasattr(self, "device") or not hasattr(self, "dtype"): raise AttributeError("Model must have 'config', 'device', and 'dtype' attributes.") gen_config = generation_config if generation_config is not None else self.generation_config if gen_config is None: raise ValueError("A GenerationConfig must be provided or set in the model.") if gen_config.eos_token_id is None: logger.warning("`eos_token_id` not set in GenerationConfig. Setting to -1 (disabled).") gen_config.eos_token_id = -1 # Create and return the manager return ContinuousBatchingManager( model=self, # type: ignore generation_config=gen_config, manual_eviction=manual_eviction, max_queue_size=max_queue_size, q_padding_interval_size=q_padding_interval_size, kv_padding_interval_size=kv_padding_interval_size, allow_block_sharing=allow_block_sharing, use_async_batching=use_async_batching, max_cached_graphs=max_cached_graphs, ) # TODO: support streaming @traced @torch.inference_mode() def generate_batch( self, inputs: list[list[int]], generation_config: GenerationConfig | None = None, q_padding_interval_size: int = 0, kv_padding_interval_size: int = 0, allow_block_sharing: bool = True, record_timestamps: bool = False, progress_bar: bool = True, use_async_batching: bool | None = None, max_cached_graphs: int = 0, **kwargs, ) -> dict[str, GenerationOutput]: """Generate sequences for a batch of prompts using continuous batching. Args: inputs: List of input token sequences (prompts) generation_config: Optional generation configuration q_padding_interval_size: Padding granularity for queries in tokens. 0 uses default. kv_padding_interval_size: Padding granularity for KV cache in tokens. 0 uses default. allow_block_sharing: A flag to allow block sharing if the model has some full attention layers record_timestamps: If set to true, the requests will have a timestamp for each token generated progress_bar: If set to true, a progress bar will be displayed use_async_batching: Whether to use async double buffering or not. If None, will be automatically detected. max_cached_graphs: Maximum number of cached CUDA graphs. 0 uses default. **kwargs: Additional generation parameters Returns: `dict[str, GenerationOutput]`: a dictionary of request ids to GenerationOutput objects """ if not inputs: return {} if logger.getEffectiveLevel() <= logging.DEBUG: logger.warning("Progress bar is disabled when logger level is less than DEBUG") progress_bar = False # Initialize manager with the batch inputs results = {} gen_cfg = self.generation_config if generation_config is None else generation_config num_requests = len(inputs) * (gen_cfg.num_return_sequences if gen_cfg.num_return_sequences is not None else 1) # Prepare context managers for the main loop manager_cm = self.continuous_batching_context_manager( generation_config=generation_config, q_padding_interval_size=q_padding_interval_size, kv_padding_interval_size=kv_padding_interval_size, max_cached_graphs=max_cached_graphs, allow_block_sharing=allow_block_sharing, block=True, timeout=5, use_async_batching=use_async_batching, ) logging_cm = logging_redirect_tqdm([logger]) pbar_cm = tqdm( total=num_requests, disable=(not progress_bar), desc=f"Solving {num_requests} requests", unit="request", ) # Main loop with manager_cm as manager, logging_cm, pbar_cm as pbar: try: manager.add_requests( inputs=inputs, max_new_tokens=kwargs.get("max_new_tokens"), record_timestamps=record_timestamps ) finished_count = 0 while finished_count < num_requests: result = manager.get_result(timeout=1) if result: req_id = result.request_id if result.is_finished(): results[req_id] = result finished_count += 1 pbar.update(1) else: if not manager.is_running(): logger.error("Generation thread terminated unexpectedly.") # This helps get some information in stdout print("Returning results of generate_batch despite unexpected termination.") break except Exception as e: logger.error(f"Error during batch generation: {e}", exc_info=True) # Re-order requests to match the order of the inputs reordered_results = {} for i in range(len(inputs)): # We cannot guarantee that the generation succeeded for all requests, so we need to check if the request is in the results result = results.get(f"req_{i}") if result is not None: reordered_results[f"req_{i}"] = result else: logger.error(f"Request req_{i} not found in results.") return reordered_results
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/generation/continuous_batching/continuous_api.py", "license": "Apache License 2.0", "lines": 961, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/generation/continuous_batching/scheduler.py
# Copyright 2025 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading from abc import ABC, abstractmethod from collections import deque from ...utils.metrics import attach_tracer, traced from .cache import PagedAttentionCache from .requests import FutureRequestState, RequestState, RequestStatus, logger class Scheduler(ABC): """ Abstract base class for scheduling requests in the continuous batch processor. Schedulers manage the lifecycle of requests from when they are added to the waiting queue to when they are scheduled for processing. Different schedulers implement different strategies for prioritizing and batching requests. """ def __init__(self, cache: PagedAttentionCache, retain_cache_on_finish: bool = False): self.active_requests: dict[str, RequestState] = {} self.waiting_requests: dict[str, RequestState] = {} self.waiting_requests_order: deque[str] = deque() self.cache = cache self.retain_cache_on_finish = retain_cache_on_finish self._cancellation_lock = threading.Lock() self._requests_to_cancel: set[str] = set() self._requests_to_fork: list[RequestState] = [] # This state is used to avoid infinite loops when offloading requests self.block_new_requests = False # This is to compute the cache used by a new request being scheduled self.cache_budget_module = None if cache.num_full_attention_groups else cache.config.sliding_window @traced def add_waiting_request(self, state: RequestState): """Adds a request to the waiting list.""" if self.retain_cache_on_finish and state.request_id in self.active_requests: old_state = self.active_requests.pop(state.request_id) state.tokens_to_process = state.tokens_to_process[ len(old_state.initial_tokens) : ] # XXX: check for indexing error? state.allocated_blocks = old_state.allocated_blocks state.position_offset = old_state.position_offset self.waiting_requests[state.request_id] = state self.waiting_requests_order.append(state.request_id) @abstractmethod def schedule_batch(self, token_budget: int, cache_budget: int) -> list[FutureRequestState] | None: """Schedules requests for the next batch based on available token and cache budgets. This method selects which requests should be processed in the current batch, considering the budgets and the scheduler's prioritization rules. The token_budget is the maximum number of tokens that can be processed in a batch, and the cache_budget is the maximum number of KV cache entries that can be read in a batch.""" @traced def has_pending_requests(self) -> bool: """Checks if there are requests ready to be processed.""" return bool(len(self.active_requests) or len(self.waiting_requests)) @traced def finish_request(self, request_id: str, evict_from_cache: bool = True) -> None: """Completes processing of a request and optionally frees its allocated cache blocks. This method is called when a request has finished generation or encountered an error. """ if evict_from_cache: self.cache.free_blocks(request_id) self.active_requests.pop(request_id, None) @traced def get_active_request_static_outputs(self, request_id: str) -> list[int]: """Gets generated tokens for an active request.""" if request_id in self.active_requests: return self.active_requests[request_id].generated_tokens return [] @traced def set_request_cancellation(self, request_id: str): """Marks a request for cancellation.""" with self._cancellation_lock: self._requests_to_cancel.add(request_id) @traced def clear_cancelled_requests(self): """Remove all cancelled requests from active and waiting queues.""" with self._cancellation_lock: for request_id in self._requests_to_cancel: self.active_requests.pop(request_id, None) self.waiting_requests.pop(request_id, None) if request_id in self.waiting_requests_order: self.waiting_requests_order.remove(request_id) self.cache.free_blocks(request_id) self._requests_to_cancel = set() @traced def request_is_cancelled(self, request_id: str) -> bool: """Checks if a request has been cancelled or removed.""" return request_id in self._requests_to_cancel or ( request_id not in self.active_requests and request_id not in self.waiting_requests ) @traced def _allocate_blocks_if_needed(self, state: RequestState, len_next_tokens: int) -> bool: """Allocate additional cache blocks for a request if the currently allocated blocks are insufficient to accommodate the next tokens. It calculates how many blocks are needed based on the request's current cache occupancy and the number of tokens to be processed. The allocation itself is done by the CacheAllocator objects. Returns a boolean indicating if the allocation was successful or not. """ # 1. we check that the occupancy is less than the requested length # 2. we allocate enough blocks to cover the requested length current_len = state.current_len() occupancy = state.allocated_blocks * self.cache.block_size - current_len if occupancy < len_next_tokens or state.allocated_blocks == 0: blocks_needed = ((len_next_tokens - occupancy + 1) // self.cache.block_size) + 1 allocated = self.cache.allocate_blocks(blocks_needed, state.request_id, state.allocated_blocks) if allocated is None: return False state.allocated_blocks += allocated return True def _infer_request_tokens(self, state: RequestState, request_ids_to_remove_from_waiting: set[str]) -> list[int]: """Prepares a request for processing in the current batch. If prefix sharing is enabled, and the request was pending, this is where we look for a prefix match and split the request if found.""" # If prefix sharing is enabled, we look for a prefix match and split the request if found if self.cache.use_prefix_sharing and state.status == RequestStatus.PENDING: prefill_length = self.cache.search_prefix_match(state.request_id, state.remaining_prefill_tokens) if prefill_length > 0: self.active_requests[state.request_id] = state request_ids_to_remove_from_waiting.add(state.request_id) state.status = RequestStatus.PREFILLING # We keep track of the number of allocated blocks to avoid double allocation state.allocated_blocks += prefill_length // self.cache.block_size # Even if we match the whole request, we keep at least 1 token to start decoding prefill_length = min(prefill_length, len(state.remaining_prefill_tokens) - 1) state.remaining_prefill_tokens = state.remaining_prefill_tokens[prefill_length:] state.position_offset += prefill_length # If the request is decoding, the tokens to process are already set if state.status == RequestStatus.DECODING: request_tokens = state.tokens_to_process # Otherwise, the tokens to process are the remaining prefill tokens else: request_tokens = state.remaining_prefill_tokens return request_tokens def _schedule_request( self, state: RequestState, request_tokens: list[int], token_budget: int, request_ids_to_remove_from_waiting: set[str], ) -> None: """Schedules a request for the current batch, updating the request's status according to the token budget left. After a request is scheduled, it is part of the next batch unless there is an error. If the request has children (for parallel decoding), it ensures at least one token remains before the request is forked.""" # If the request has one or more children we make sure not to prefill it entirely # This does not check the request state, but DECODING request already have children set to 0. if state.num_children > 0 and token_budget >= len(request_tokens) - 1: token_budget = len(request_tokens) - 1 self._requests_to_fork.append(state) # Case: we can process the entire prompt/remainder if len(request_tokens) < token_budget: if state.status == RequestStatus.PENDING: self.active_requests[state.request_id] = state request_ids_to_remove_from_waiting.add(state.request_id) if state.status <= RequestStatus.PREFILLING: state.tokens_to_process = state.remaining_prefill_tokens state.remaining_prefill_tokens = [] # Although prefill will only be done after the batch being scheduled now, we set the status to DECODING # to stay coherent when using asynchronous batching state.status = RequestStatus.DECODING # Otherwise: we need to split the request else: if state.status == RequestStatus.PENDING: self.active_requests[state.request_id] = state state.status = RequestStatus.PREFILLING request_ids_to_remove_from_waiting.add(state.request_id) state.remaining_prefill_tokens = request_tokens[token_budget:] state.tokens_to_process = request_tokens[:token_budget] def _process_candidates( self, candidates: list[RequestState], token_budget: int, cache_budget: int, request_ids_to_remove_from_waiting: set[str], safety_margin: float = 0.0, ) -> tuple[list[FutureRequestState], bool]: """Schedules candidate requests for the current batch. This method contains the common logic shared by all schedulers: it checks token and cache budgets, allocates cache blocks if needed, updates request states, and tracks which waiting requests should be removed from the waiting queue. """ scheduled_requests = [] one_allocation_failed = False safety_margins = safety_margin * self.cache.num_blocks for state in candidates: num_free_blocks = self.cache.get_num_free_blocks() # If we are out the safety margin, we only accept decoding requests or the first prefill request outside_safety_margin = num_free_blocks < safety_margins if outside_safety_margin and scheduled_requests and state.status != RequestStatus.DECODING: logger.info( f"Outside safety margin, breaking out of scheduling loop. {num_free_blocks = } {safety_margins = }" ) break # Check cache budget cache_needed = state.current_len() cache_needed = ( cache_needed if self.cache_budget_module is None else cache_needed % self.cache_budget_module ) if cache_budget < cache_needed: continue # Infer the tokens that will be present in the batch if token budget is enough request_tokens = self._infer_request_tokens(state, request_ids_to_remove_from_waiting) # Account for token budget request_len = min(len(request_tokens), token_budget) # Check there will be enough cache for the new tokens allocation_successful = self._allocate_blocks_if_needed(state, request_len) # If the allocation would not be successful, we move on to the next request if not allocation_successful: one_allocation_failed = True # If we reached a waiting request and the cache is full, all subsequent waiting requests will need # allocation as well, so we can safely break out of the scheduling loop. if num_free_blocks == 0 and state.request_id in self.waiting_requests: logger.info(f"Breaking mid-loop for request {state.request_id} because the cache is full") break continue # If this point is reached, it means we can safely schedule the request self._schedule_request(state, request_tokens, token_budget, request_ids_to_remove_from_waiting) request_len = len(state.tokens_to_process) # it may change after scheduling # Update the token and cache budgets token_budget -= request_len cache_budget -= cache_needed # If using prefix sharing, we make note of the blocks that will be computed in the forward pass if self.cache.allow_block_sharing: tokens_in_current_block = state.current_len() % self.cache.block_size tokens_after_forward = tokens_in_current_block + request_len complete_blocks = tokens_after_forward // self.cache.block_size else: complete_blocks = 0 # Store the future request state has_new_token = not state.remaining_prefill_tokens scheduled_requests.append(FutureRequestState(state, has_new_token, complete_blocks)) # Remove the request from the waiting queue and mark it as removed req_id = state.request_id was_waiting = self.waiting_requests.pop(req_id, None) is not None if was_waiting: request_ids_to_remove_from_waiting.add(req_id) # Early exit of the loop if we have no budget left if token_budget == 0 or cache_budget == 0: break return scheduled_requests, one_allocation_failed def _cleanup_waiting_queue(self, request_ids_to_remove_from_waiting: set[str]) -> None: """Removes processed requests from the waiting queue order.""" self.waiting_requests_order = deque( [req_id for req_id in self.waiting_requests_order if req_id not in request_ids_to_remove_from_waiting] ) # TODO: further common-ize the two classes @attach_tracer() class FIFOScheduler(Scheduler): """This scheduler processes requests in the order they arrive, meaning decoding requests has priority over prefilling requests. Additionally, it includes a safety margin mechanism to prevent cache exhaustion. By default, when 80% of the cache is full, new requests will not be scheduled to prioritize decoding active requests.""" def __init__(self, cache: PagedAttentionCache, retain_cache_on_finish: bool = False, safety_margin: float = 0.2): """Initializes the FIFO scheduler. The safety margin is the percentage of free blocks under which we stop scheduling new prefill requests, so safety_margin = 0.1 means that when there is less than 10% of free blocks, or equivalently when more than 90% of blocks are already allocated, we stop scheduling new prefill requests. """ super().__init__(cache, retain_cache_on_finish) self.safety_margin = safety_margin @traced def schedule_batch(self, token_budget: int, cache_budget: int) -> list[FutureRequestState] | None: priority_states: list[RequestState] = [] second_priority_states: list[RequestState] = [] for state in self.active_requests.values(): if state.status == RequestStatus.DECODING: priority_states.append(state) elif state.status == RequestStatus.PREFILLING: second_priority_states.append(state) # Add waiting requests to second priority if not self.block_new_requests: for req_id in self.waiting_requests_order: second_priority_states.append(self.waiting_requests[req_id]) candidates = priority_states + second_priority_states request_ids_to_remove_from_waiting = set() scheduled_requests, one_allocation_failed = self._process_candidates( candidates, token_budget, cache_budget, request_ids_to_remove_from_waiting, safety_margin=self.safety_margin, ) # We remove waiting requests before checking requests were scheduled, because there might have been prefill matches self._cleanup_waiting_queue(request_ids_to_remove_from_waiting) # If no requests were scheduled and the cache is full, we signal it by returning None if not scheduled_requests and one_allocation_failed: return None return scheduled_requests # FIXME: prioritize adding from waiting reqs before scheduling `RequestStatus.DECODING` when cache space allows it # TODO: further consolidate the code by making more of it common. The reference Scheduler is FIFO, not this one. @attach_tracer() class PrefillFirstScheduler(Scheduler): """Scheduler that prioritizes split prefill requests over decoding requests. This scheduler ensures that split prefill requests (which are continuations of partially processed prompts) are completed before processing new decoding requests.""" @traced def schedule_batch(self, token_budget: int, cache_budget: int) -> list[FutureRequestState] | None: priority_states: list[RequestState] = [] second_priority_states: list[RequestState] = [] for state in self.active_requests.values(): # XXX: when cache is full, state can stay on `PREFILLING_SPLIT` so we need to take those into account if state.status == RequestStatus.PREFILLING: priority_states.append(state) elif state.status == RequestStatus.DECODING: second_priority_states.append(state) # Add waiting requests to second priority if not self.block_new_requests: for req_id in self.waiting_requests_order: second_priority_states.append(self.waiting_requests[req_id]) candidates = priority_states + second_priority_states request_ids_to_remove_from_waiting = set() scheduled_requests, one_allocation_failed = self._process_candidates( candidates, token_budget, cache_budget, request_ids_to_remove_from_waiting, safety_margin=0.0, ) # We remove waiting requests before checking requests were scheduled, because there might have been prefill matches self._cleanup_waiting_queue(request_ids_to_remove_from_waiting) # If no requests were scheduled and the cache is full, we signal it by returning None if not scheduled_requests and one_allocation_failed: return None return scheduled_requests SCHEDULER_MAPPING = { "fifo": FIFOScheduler, "prefill_first": PrefillFirstScheduler, }
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/generation/continuous_batching/scheduler.py", "license": "Apache License 2.0", "lines": 330, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/seed_oss/configuration_seed_oss.py
# Copyright 2025 Bytedance-Seed Ltd and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SeedOss model configuration""" from ...configuration_utils import PreTrainedConfig from ...modeling_rope_utils import RopeParameters class SeedOssConfig(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`SeedOssModel`]. It is used to instantiate an SeedOss model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SeedOss-36B. e.g. [ByteDance-Seed/SeedOss-36B](https://huggingface.co/ByteDance-Seed/SeedOss-36B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 155136): Vocabulary size of the SeedOss model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SeedOssModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 27648): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 64): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 80): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 524288): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. pretraining_tp (`int`, *optional*, defaults to 1): Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is necessary to ensure exact reproducibility of the pretraining results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232). tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value layers during self-attention. attention_out_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the output projection layer during self-attention. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. residual_dropout (`float`, *optional*, defaults to 0.1): Residual connection dropout value. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. head_dim (`int`, *optional*, defaults to 128): The attention head dimension. ```python >>> from transformers import SeedOssModel, SeedOssConfig >>> # Initializing a SeedOss-36b style configuration >>> configuration = SeedOssConfig() >>> # Initializing a model from the SeedOss-36b style configuration >>> model = SeedOssModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "seed_oss" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `SeedOssModel` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int | None = 155136, hidden_size: int | None = 4096, intermediate_size: int | None = 27648, num_hidden_layers: int | None = 64, num_attention_heads: int | None = 80, num_key_value_heads: int | None = 8, hidden_act: str | None = "silu", max_position_embeddings: int | None = 524288, initializer_range: float | None = 0.02, rms_norm_eps: float | None = 1e-6, use_cache: bool | None = True, pad_token_id: int | None = 1, bos_token_id: int | None = 0, eos_token_id: int | None = 2, pretraining_tp: int | None = 1, tie_word_embeddings: bool | None = False, rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None, attention_bias: bool | None = True, attention_out_bias: bool | None = False, attention_dropout: float | None = 0.1, residual_dropout: float | None = 0.1, mlp_bias: bool | None = False, head_dim: int | None = 128, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.attention_bias = attention_bias self.attention_out_bias = attention_out_bias self.attention_dropout = attention_dropout self.residual_dropout = residual_dropout self.mlp_bias = mlp_bias self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads self.rope_parameters = rope_parameters self.tie_word_embeddings = tie_word_embeddings self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(**kwargs) __all__ = ["SeedOssConfig"]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/seed_oss/configuration_seed_oss.py", "license": "Apache License 2.0", "lines": 164, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:src/transformers/models/seed_oss/modular_seed_oss.py
# Copyright 2025 Bytedance-Seed Ltd and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SeedOss model.""" from collections.abc import Callable import torch import torch.nn as nn from ...activations import ACT2FN from ...cache_utils import Cache from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ..llama.modeling_llama import ( LlamaDecoderLayer, LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm, apply_rotary_pos_emb, eager_attention_forward, ) from .configuration_seed_oss import SeedOssConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "ByteDance-Seed/SeedOss-36B" class SeedOssRMSNorm(LlamaRMSNorm): pass class SeedOssMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] self.residual_dropout = config.residual_dropout def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) down_proj = nn.functional.dropout(down_proj, p=self.residual_dropout, training=self.training) return down_proj class SeedOssAttention(nn.Module): def __init__(self, config: SeedOssConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_attention_heads = config.num_attention_heads self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, self.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( self.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_out_bias ) self.residual_dropout = config.residual_dropout def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: torch.Tensor | None, past_key_values: Cache | None = None, cache_position: torch.LongTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface( self.config._attn_implementation, eager_attention_forward ) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) return attn_output, attn_weights class SeedOssDecoderLayer(LlamaDecoderLayer): pass class SeedOssPreTrainedModel(LlamaPreTrainedModel): pass class SeedOssModel(LlamaModel): pass class SeedOssForCausalLM(LlamaForCausalLM): def forward( self, **super_kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, SeedOssForCausalLM >>> model = SeedOssForCausalLM.from_pretrained("ByteDance-Seed/SeedOss-36B") >>> tokenizer = AutoTokenizer.from_pretrained("ByteDance-Seed/SeedOss-36B") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" return super().forward(**super_kwargs) class SeedOssForSequenceClassification(LlamaForSequenceClassification): pass class SeedOssForTokenClassification(LlamaForTokenClassification): pass class SeedOssForQuestionAnswering(LlamaForQuestionAnswering): pass __all__ = [ "SeedOssForCausalLM", "SeedOssForQuestionAnswering", "SeedOssPreTrainedModel", "SeedOssModel", "SeedOssForSequenceClassification", "SeedOssForTokenClassification", ]
{ "repo_id": "huggingface/transformers", "file_path": "src/transformers/models/seed_oss/modular_seed_oss.py", "license": "Apache License 2.0", "lines": 160, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/transformers:tests/models/seed_oss/test_modeling_seed_oss.py
# Copyright 2025 Bytedance-Seed Ltd and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SeedOss model.""" import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, is_torch_available from transformers.testing_utils import ( cleanup, require_flash_attn, require_torch, require_torch_large_accelerator, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( SeedOssModel, ) class SeedOssModelTester(CausalLMModelTester): if is_torch_available(): base_model_class = SeedOssModel @require_torch class SeedOssModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = SeedOssModelTester _is_stateful = True model_split_percents = [0.5, 0.6] @slow @require_torch_large_accelerator class SeedOssIntegrationTest(unittest.TestCase): input_text = ["How to make pasta?", "Hi ByteDance-Seed"] model_id = "ByteDance-Seed/Seed-OSS-36B-Base" def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_model_36b_eager(self): EXPECTED_TEXTS = [ "How to make pasta?\nHow to make pasta?\nPasta is a popular dish that is enjoyed by people all over", "Hi ByteDance-Seed team,\nI am trying to run the code on the <beginning of the code>seed", ] model = AutoModelForCausalLM.from_pretrained( "ByteDance-Seed/Seed-OSS-36B-Base", torch_dtype=torch.bfloat16, attn_implementation="eager", device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True, return_token_type_ids=False).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) def test_model_36b_sdpa(self): EXPECTED_TEXTS = [ "How to make pasta?\nHow to make pasta?\nPasta is a popular dish that is enjoyed by people all over", "Hi ByteDance-Seed team,\nI am trying to run the code on the <beginning of the code>seed", ] # default attention is `sdpa` (and this model repo. doesn't specify explicitly) --> we get `sdpa` here model = AutoModelForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True, return_token_type_ids=False).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_flash_attn @require_torch_large_accelerator @pytest.mark.flash_attn_test def test_model_36b_flash_attn(self): EXPECTED_TEXTS = [ "How to make pasta?\nHow to make pasta?\nPasta is a popular dish that is enjoyed by people all over", "Hi ByteDance-Seed team,\nI am trying to run the code on the <beginning of the code>seed", ] model = AutoModelForCausalLM.from_pretrained( self.model_id, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True, return_token_type_ids=False).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS)
{ "repo_id": "huggingface/transformers", "file_path": "tests/models/seed_oss/test_modeling_seed_oss.py", "license": "Apache License 2.0", "lines": 98, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test