code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def test_generation_beyond_sliding_window(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a HybridCache).
Outputs for every attention functions sho... | Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a HybridCache).
Outputs for every attention functions should be coherent and identical.
| test_generation_beyond_sliding_window | python | huggingface/transformers | tests/models/gemma3/test_modeling_gemma3.py | https://github.com/huggingface/transformers/blob/master/tests/models/gemma3/test_modeling_gemma3.py | Apache-2.0 |
def prepare_image_inputs(self, batch_size: Optional[int] = None):
"""This function prepares a list of PIL images for testing"""
images = super().prepare_image_inputs(batch_size)
if isinstance(images, (list, tuple)):
images = [[image] for image in images]
return images | This function prepares a list of PIL images for testing | prepare_image_inputs | python | huggingface/transformers | tests/models/gemma3/test_processing_gemma3.py | https://github.com/huggingface/transformers/blob/master/tests/models/gemma3/test_processing_gemma3.py | Apache-2.0 |
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_siz... | Tests that special vision tokens do not get truncated when `truncation=True` is set. | test_special_mm_token_truncation | python | huggingface/transformers | tests/models/gemma3/test_processing_gemma3.py | https://github.com/huggingface/transformers/blob/master/tests/models/gemma3/test_processing_gemma3.py | Apache-2.0 |
def test_flash_attn_2_generate_padding_left(self):
"""
Overwriting the common test as the test is flaky on tiny models
"""
model = GPT2LMHeadModel.from_pretrained("gpt2", torch_dtype=torch.float16).to(0)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
texts = ["hi", "... |
Overwriting the common test as the test is flaky on tiny models
| test_flash_attn_2_generate_padding_left | python | huggingface/transformers | tests/models/gpt2/test_modeling_gpt2.py | https://github.com/huggingface/transformers/blob/master/tests/models/gpt2/test_modeling_gpt2.py | Apache-2.0 |
def test_lm_generate_distilgpt2_left_padding(self):
"""Tests that the generated text is the same, regardless of left padding"""
model = TFGPT2LMHeadModel.from_pretrained("distilbert/distilgpt2")
tokenizer = GPT2Tokenizer.from_pretrained("distilbert/distilgpt2")
tokenizer.pad_token = tok... | Tests that the generated text is the same, regardless of left padding | test_lm_generate_distilgpt2_left_padding | python | huggingface/transformers | tests/models/gpt2/test_modeling_tf_gpt2.py | https://github.com/huggingface/transformers/blob/master/tests/models/gpt2/test_modeling_tf_gpt2.py | Apache-2.0 |
def copy_cache(cache: DynamicCache):
"""Deep copy a DynamicCache to reuse the same one multiple times."""
new_cache = cache
for i in range(len(cache)):
new_cache.key_cache[i] = cache.key_cache[i].clone()
new_cache.value_cache[i] = cache.value_cache[i].... | Deep copy a DynamicCache to reuse the same one multiple times. | copy_cache | python | huggingface/transformers | tests/models/gpt_neox/test_modeling_gpt_neox.py | https://github.com/huggingface/transformers/blob/master/tests/models/gpt_neox/test_modeling_gpt_neox.py | Apache-2.0 |
def test_save_load_pretrained_default(self):
"""Ensure we can save / reload a processor correctly."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_proce... | Ensure we can save / reload a processor correctly. | test_save_load_pretrained_default | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_bad_text_fails(self):
"""Ensure we gracefully fail if text is the wrong type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(tokenizer=tokenizer, audio_processor=audio_processor)
with pytest.raises(Typ... | Ensure we gracefully fail if text is the wrong type. | test_bad_text_fails | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_bad_nested_text_fails(self):
"""Ensure we gracefully fail if text is the wrong nested type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_pro... | Ensure we gracefully fail if text is the wrong nested type. | test_bad_nested_text_fails | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_bad_audio_fails(self):
"""Ensure we gracefully fail if audio is the wrong type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_processor,
... | Ensure we gracefully fail if audio is the wrong type. | test_bad_audio_fails | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_nested_bad_audio_fails(self):
"""Ensure we gracefully fail if audio is the wrong nested type."""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenizer,
audio_processor=audio_p... | Ensure we gracefully fail if audio is the wrong nested type. | test_nested_bad_audio_fails | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_audio_token_filling_same_len_feature_tensors(self, vec_dims, num_expected_features, random_func):
"""Ensure audio token filling is handled correctly when we have
one or more audio inputs whose features are all the same length
stacked into a tensor / numpy array.
NOTE: Currently... | Ensure audio token filling is handled correctly when we have
one or more audio inputs whose features are all the same length
stacked into a tensor / numpy array.
NOTE: Currently we enforce that each sample can only have one audio.
| test_audio_token_filling_same_len_feature_tensors | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_audio_token_filling_varying_len_feature_list(self):
"""Ensure audio token filling is handled correctly when we have
multiple varying len audio sequences passed as a list.
"""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor ... | Ensure audio token filling is handled correctly when we have
multiple varying len audio sequences passed as a list.
| test_audio_token_filling_varying_len_feature_list | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def test_device_override(self):
"""Ensure that we regardless of the processing device, the tensors
produced are on the CPU.
"""
tokenizer = self.get_tokenizer()
audio_processor = self.get_audio_processor()
processor = GraniteSpeechProcessor(
tokenizer=tokenize... | Ensure that we regardless of the processing device, the tensors
produced are on the CPU.
| test_device_override | python | huggingface/transformers | tests/models/granite_speech/test_processor_granite_speech.py | https://github.com/huggingface/transformers/blob/master/tests/models/granite_speech/test_processor_granite_speech.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to GroundingDinoImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]... |
This function computes the expected height and width when providing images to GroundingDinoImageProcessor,
assuming do_resize is set to True with a scalar size.
| get_expected_values | python | huggingface/transformers | tests/models/grounding_dino/test_image_processing_grounding_dino.py | https://github.com/huggingface/transformers/blob/master/tests/models/grounding_dino/test_image_processing_grounding_dino.py | Apache-2.0 |
def generate_fake_bounding_boxes(n_boxes):
"""Generate bounding boxes in the format (center_x, center_y, width, height)"""
# Validate the input
if not isinstance(n_boxes, int):
raise ValueError("n_boxes must be an integer")
if n_boxes <= 0:
raise ValueError("n_boxes must be a positive in... | Generate bounding boxes in the format (center_x, center_y, width, height) | generate_fake_bounding_boxes | python | huggingface/transformers | tests/models/grounding_dino/test_modeling_grounding_dino.py | https://github.com/huggingface/transformers/blob/master/tests/models/grounding_dino/test_modeling_grounding_dino.py | Apache-2.0 |
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbe... | This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
| test_create_position_ids_respects_padding_index | python | huggingface/transformers | tests/models/ibert/test_modeling_ibert.py | https://github.com/huggingface/transformers/blob/master/tests/models/ibert/test_modeling_ibert.py | Apache-2.0 |
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbedding... | This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
| test_create_position_ids_from_inputs_embeds | python | huggingface/transformers | tests/models/ibert/test_modeling_ibert.py | https://github.com/huggingface/transformers/blob/master/tests/models/ibert/test_modeling_ibert.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to IdeficsImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
size = self.... |
This function computes the expected height and width when providing images to IdeficsImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
| get_expected_values | python | huggingface/transformers | tests/models/idefics/test_image_processing_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_image_processing_idefics.py | Apache-2.0 |
def test_left_padding_compatibility(self):
"""Overwrite because IDEFICS needs image attention mask to be also padded"""
# NOTE: left-padding results in small numerical differences. This is expected.
# See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
d... | Overwrite because IDEFICS needs image attention mask to be also padded | test_left_padding_compatibility | python | huggingface/transformers | tests/models/idefics/test_modeling_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py | Apache-2.0 |
def test_generate_continue_from_past_key_values(self):
"""Overwrite because IDEFICS needs image attention mask to be also processed"""
# Tests that we can continue generating from past key values, returned from a previous `generate` call
for model_class in self.all_generative_model_classes:
... | Overwrite because IDEFICS needs image attention mask to be also processed | test_generate_continue_from_past_key_values | python | huggingface/transformers | tests/models/idefics/test_modeling_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py | Apache-2.0 |
def test_generate_without_input_ids(self):
"""Overwrite because IDEFICS needs image attention mask to be also processed and requires image at input always."""
config, input_dict = self.prepare_config_and_inputs_for_generate()
pixel_values = input_dict["pixel_values"]
image_attention_mas... | Overwrite because IDEFICS needs image attention mask to be also processed and requires image at input always. | test_generate_without_input_ids | python | huggingface/transformers | tests/models/idefics/test_modeling_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py | Apache-2.0 |
def test_generate_continue_from_inputs_embeds(self):
"""Overwrite for IDEFICS: Ensure image attention mask is processed while continuing from `inputs_embeds`."""
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
... | Overwrite for IDEFICS: Ensure image attention mask is processed while continuing from `inputs_embeds`. | test_generate_continue_from_inputs_embeds | python | huggingface/transformers | tests/models/idefics/test_modeling_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py | Apache-2.0 |
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
"""
Overwrite from generation tests because Idefics has only SDPA layers.
Do not skip because we still want generation tests to run. Rather we can remove... |
Overwrite from generation tests because Idefics has only SDPA layers.
Do not skip because we still want generation tests to run. Rather we can remove checks for shape.
| _check_attentions_for_generate | python | huggingface/transformers | tests/models/idefics/test_modeling_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_modeling_idefics.py | Apache-2.0 |
def prepare_prompts(self):
"""This function prepares a list of PIL images"""
num_images = 2
images = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8) for x in range(num_images)]
images = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in images]
# print([type(x) for x i... | This function prepares a list of PIL images | prepare_prompts | python | huggingface/transformers | tests/models/idefics/test_processor_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_processor_idefics.py | Apache-2.0 |
def test_tokenizer_left_padding(self):
"""Identical to test_tokenizer_padding, but with padding_side not explicitly set."""
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor)
... | Identical to test_tokenizer_padding, but with padding_side not explicitly set. | test_tokenizer_left_padding | python | huggingface/transformers | tests/models/idefics/test_processor_idefics.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics/test_processor_idefics.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to BridgeTowerImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
shortest... |
This function computes the expected height and width when providing images to BridgeTowerImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
| get_expected_values | python | huggingface/transformers | tests/models/idefics2/test_image_processing_idefics2.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics2/test_image_processing_idefics2.py | Apache-2.0 |
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepare... | This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
| prepare_image_inputs | python | huggingface/transformers | tests/models/idefics2/test_image_processing_idefics2.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics2/test_image_processing_idefics2.py | Apache-2.0 |
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepare... | This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
| prepare_image_inputs | python | huggingface/transformers | tests/models/idefics3/test_image_processing_idefics3.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics3/test_image_processing_idefics3.py | Apache-2.0 |
def test_text_only_inference(self):
"""Test that the processor works correctly with text-only input."""
processor = self.get_processor()
text = "This is a simple text without images."
inputs = processor(text=text)
tokenized_sentence = processor.tokenizer(text, add_special_token... | Test that the processor works correctly with text-only input. | test_text_only_inference | python | huggingface/transformers | tests/models/idefics3/test_processor_idefics3.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics3/test_processor_idefics3.py | Apache-2.0 |
def test_missing_images_error(self):
"""Test that appropriate error is raised when images are referenced but not provided."""
processor = self.get_processor()
# Test single text with image token but no image
text = "Let me show you this image: <image> What do you think?"
with se... | Test that appropriate error is raised when images are referenced but not provided. | test_missing_images_error | python | huggingface/transformers | tests/models/idefics3/test_processor_idefics3.py | https://github.com/huggingface/transformers/blob/master/tests/models/idefics3/test_processor_idefics3.py | Apache-2.0 |
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = IJepaModel.from_pretrained(
"facebook/ijepa_vith14_1k",
torch_dtype=torch.float16,
device_map="auto",
)
... |
A small test to make sure that inference work in half precision without any problem.
| test_inference_fp16 | python | huggingface/transformers | tests/models/ijepa/test_modeling_ijepa.py | https://github.com/huggingface/transformers/blob/master/tests/models/ijepa/test_modeling_ijepa.py | Apache-2.0 |
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this on... |
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after ... | test_sdpa_can_dispatch_composite_models | python | huggingface/transformers | tests/models/instructblip/test_modeling_instructblip.py | https://github.com/huggingface/transformers/blob/master/tests/models/instructblip/test_modeling_instructblip.py | Apache-2.0 |
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
In contrast to the above test, this on... |
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after ... | test_sdpa_can_dispatch_composite_models | python | huggingface/transformers | tests/models/instructblipvideo/test_modeling_instructblipvideo.py | https://github.com/huggingface/transformers/blob/master/tests/models/instructblipvideo/test_modeling_instructblipvideo.py | Apache-2.0 |
def test_apply_chat_template_video_special_processing(self):
"""
Tests that models can use their own preprocessing to preprocess conversations.
"""
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
... |
Tests that models can use their own preprocessing to preprocess conversations.
| test_apply_chat_template_video_special_processing | python | huggingface/transformers | tests/models/internvl/test_processor_internvl.py | https://github.com/huggingface/transformers/blob/master/tests/models/internvl/test_processor_internvl.py | Apache-2.0 |
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_experts = 16
config.output_router_... |
Let's make sure we can actually compute the loss and do a backward on it.
| test_load_balancing_loss | python | huggingface/transformers | tests/models/jamba/test_modeling_jamba.py | https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py | Apache-2.0 |
def test_initialization(self):
r"""
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
... |
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
| test_initialization | python | huggingface/transformers | tests/models/jamba/test_modeling_jamba.py | https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py | Apache-2.0 |
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Jamba model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = get... |
Overriding the test_attention_outputs test as the Jamba model outputs attention only for its attention layers
| test_attention_outputs | python | huggingface/transformers | tests/models/jamba/test_modeling_jamba.py | https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py | Apache-2.0 |
def test_flash_attn_2_fp32_ln(self):
r"""
Overriding the test_flash_attn_2_fp32_ln test as the Jamba model, like Mixtral, doesn't support
right padding + use cache with FA2
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_test... |
Overriding the test_flash_attn_2_fp32_ln test as the Jamba model, like Mixtral, doesn't support
right padding + use cache with FA2
| test_flash_attn_2_fp32_ln | python | huggingface/transformers | tests/models/jamba/test_modeling_jamba.py | https://github.com/huggingface/transformers/blob/master/tests/models/jamba/test_modeling_jamba.py | Apache-2.0 |
def test_chat_template_accepts_processing_kwargs(self):
"""Tests that the chat template correctly handles additional processing arguments."""
# Get processor and skip if it doesn't have a chat template
processor = self.get_processor()
if processor.chat_template is None:
self.... | Tests that the chat template correctly handles additional processing arguments. | test_chat_template_accepts_processing_kwargs | python | huggingface/transformers | tests/models/janus/test_processor_janus.py | https://github.com/huggingface/transformers/blob/master/tests/models/janus/test_processor_janus.py | Apache-2.0 |
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [E... |
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
Thi... | test_batch_encode_dynamic_overflowing | python | huggingface/transformers | tests/models/layoutlmv2/test_tokenization_layoutlmv2.py | https://github.com/huggingface/transformers/blob/master/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py | Apache-2.0 |
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [E... |
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
Thi... | test_batch_encode_dynamic_overflowing | python | huggingface/transformers | tests/models/layoutlmv3/test_tokenization_layoutlmv3.py | https://github.com/huggingface/transformers/blob/master/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py | Apache-2.0 |
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [E... |
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
Thi... | test_batch_encode_dynamic_overflowing | python | huggingface/transformers | tests/models/layoutxlm/test_tokenization_layoutxlm.py | https://github.com/huggingface/transformers/blob/master/tests/models/layoutxlm/test_tokenization_layoutxlm.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/led/test_modeling_led.py | https://github.com/huggingface/transformers/blob/master/tests/models/led/test_modeling_led.py | Apache-2.0 |
def test_llama_3_1_hard(self):
"""
An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences
from llama 3.1.'s RoPE can be detected
"""
# diff on `EXPECTED_TEXT`:
# 2024-08-26: updating from torch 2.3.1 to 2.4.0 slightly ... |
An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences
from llama 3.1.'s RoPE can be detected
| test_llama_3_1_hard | python | huggingface/transformers | tests/models/llama/test_modeling_llama.py | https://github.com/huggingface/transformers/blob/master/tests/models/llama/test_modeling_llama.py | Apache-2.0 |
def test_stacked_causal_mask_static_cache(self):
"""same as above but with StaticCache"""
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular ba... | same as above but with StaticCache | test_stacked_causal_mask_static_cache | python | huggingface/transformers | tests/models/llama/test_modeling_llama.py | https://github.com/huggingface/transformers/blob/master/tests/models/llama/test_modeling_llama.py | Apache-2.0 |
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
asse... |
Simple test for reloading default llava configs
| test_llava_reload | python | huggingface/transformers | tests/models/llava/test_configuration_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_configuration_llava.py | Apache-2.0 |
def test_pixtral_reload(self):
"""
Simple test for reloading pixtral configs
"""
vision_config = {
"model_type": "pixtral",
"head_dim": 64,
"hidden_act": "silu",
"image_size": 1024,
"is_composition": True,
"patch_siz... |
Simple test for reloading pixtral configs
| test_pixtral_reload | python | huggingface/transformers | tests/models/llava/test_configuration_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_configuration_llava.py | Apache-2.0 |
def test_arbitrary_reload(self):
"""
Simple test for reloading arbitrarily composed subconfigs
"""
default_values = LlavaConfig().to_diff_dict()
default_values["vision_config"]["model_type"] = "pixtral"
default_values["text_config"]["model_type"] = "opt"
self.maxD... |
Simple test for reloading arbitrarily composed subconfigs
| test_arbitrary_reload | python | huggingface/transformers | tests/models/llava/test_configuration_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_configuration_llava.py | Apache-2.0 |
def test_padding(self):
"""
LLaVA needs to pad images to square size before processing as per orig implementation.
Checks that image processor pads images correctly given different background colors.
"""
# taken from original implementation: https://github.com/haotian-liu/LLaVA/... |
LLaVA needs to pad images to square size before processing as per orig implementation.
Checks that image processor pads images correctly given different background colors.
| test_padding | python | huggingface/transformers | tests/models/llava/test_image_processing_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_image_processing_llava.py | Apache-2.0 |
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"... |
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
| test_mismatching_num_image_tokens | python | huggingface/transformers | tests/models/llava/test_modeling_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_modeling_llava.py | Apache-2.0 |
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision... |
Test that we can use either one vision feature layer, or a list of
vision feature layers.
| test_vision_feature_layers | python | huggingface/transformers | tests/models/llava/test_modeling_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_modeling_llava.py | Apache-2.0 |
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = LlavaProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input... | Tests that special vision tokens do not get truncated when `truncation=True` is set. | test_special_mm_token_truncation | python | huggingface/transformers | tests/models/llava/test_processor_llava.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava/test_processor_llava.py | Apache-2.0 |
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""... |
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
| test_mismatching_num_image_tokens | python | huggingface/transformers | tests/models/llava_next/test_modeling_llava_next.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava_next/test_modeling_llava_next.py | Apache-2.0 |
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision... |
Test that we can use either one vision feature layer, or a list of
vision feature layers.
| test_vision_feature_layers | python | huggingface/transformers | tests/models/llava_next/test_modeling_llava_next.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava_next/test_modeling_llava_next.py | Apache-2.0 |
def test_granite_vision(self):
"""
Check the expected output of a granite vision model, which leverages
multiple vision feature layers and a visual encoder with no CLS (siglip).
"""
granite_model_path = "ibm-granite/granite-vision-3.1-2b-preview"
model = LlavaNextForCondi... |
Check the expected output of a granite vision model, which leverages
multiple vision feature layers and a visual encoder with no CLS (siglip).
| test_granite_vision | python | huggingface/transformers | tests/models/llava_next/test_modeling_llava_next.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava_next/test_modeling_llava_next.py | Apache-2.0 |
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""... |
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
| test_mismatching_num_image_tokens | python | huggingface/transformers | tests/models/llava_next_video/test_modeling_llava_next_video.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava_next_video/test_modeling_llava_next_video.py | Apache-2.0 |
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision... |
Test that we can use either one vision feature layer, or a list of
vision feature layers.
| test_vision_feature_layers | python | huggingface/transformers | tests/models/llava_next_video/test_modeling_llava_next_video.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava_next_video/test_modeling_llava_next_video.py | Apache-2.0 |
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision... |
Test that we can use either one vision feature layer, or a list of
vision feature layers.
| test_vision_feature_layers | python | huggingface/transformers | tests/models/llava_onevision/test_modeling_llava_onevision.py | https://github.com/huggingface/transformers/blob/master/tests/models/llava_onevision/test_modeling_llava_onevision.py | Apache-2.0 |
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, list) or is... |
Simple utility function to check if a member is inside an interval.
| assertInterval | python | huggingface/transformers | tests/models/mamba/test_modeling_mamba.py | https://github.com/huggingface/transformers/blob/master/tests/models/mamba/test_modeling_mamba.py | Apache-2.0 |
def test_simple_generate(self):
"""
Simple generate test to avoid regressions.
Note: state-spaces (cuda) implementation and pure torch implementation
have irreconciliable differences as of now, which will cause this test to fail
in an environment with state-spaces installed.
... |
Simple generate test to avoid regressions.
Note: state-spaces (cuda) implementation and pure torch implementation
have irreconciliable differences as of now, which will cause this test to fail
in an environment with state-spaces installed.
| test_simple_generate | python | huggingface/transformers | tests/models/mamba2/test_modeling_mamba2.py | https://github.com/huggingface/transformers/blob/master/tests/models/mamba2/test_modeling_mamba2.py | Apache-2.0 |
def test_batched_equivalence_with_cache(self):
"""
Verifies that batched generation matches individual generation.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generat... |
Verifies that batched generation matches individual generation.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generation.
| test_batched_equivalence_with_cache | python | huggingface/transformers | tests/models/mamba2/test_modeling_mamba2.py | https://github.com/huggingface/transformers/blob/master/tests/models/mamba2/test_modeling_mamba2.py | Apache-2.0 |
def test_batched_equivalence_without_cache(self):
"""
Verifies that batched generation matches individual generation without cache.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from gene... |
Verifies that batched generation matches individual generation without cache.
Important because of the specific caching mechanism + statefulness of mamba model.
Depending on precision and devices, differences can be observed from generation to generation.
| test_batched_equivalence_without_cache | python | huggingface/transformers | tests/models/mamba2/test_modeling_mamba2.py | https://github.com/huggingface/transformers/blob/master/tests/models/mamba2/test_modeling_mamba2.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/marian/test_modeling_marian.py | https://github.com/huggingface/transformers/blob/master/tests/models/marian/test_modeling_marian.py | Apache-2.0 |
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [E... |
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
Thi... | test_batch_encode_dynamic_overflowing | python | huggingface/transformers | tests/models/markuplm/test_tokenization_markuplm.py | https://github.com/huggingface/transformers/blob/master/tests/models/markuplm/test_tokenization_markuplm.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to Mask2FormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
... |
This function computes the expected height and width when providing images to Mask2FormerImageProcessor,
assuming do_resize is set to True with a scalar size.
| get_expected_values | python | huggingface/transformers | tests/models/mask2former/test_image_processing_mask2former.py | https://github.com/huggingface/transformers/blob/master/tests/models/mask2former/test_image_processing_mask2former.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to MaskFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
... |
This function computes the expected height and width when providing images to MaskFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
| get_expected_values | python | huggingface/transformers | tests/models/maskformer/test_image_processing_maskformer.py | https://github.com/huggingface/transformers/blob/master/tests/models/maskformer/test_image_processing_maskformer.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/mbart/test_modeling_mbart.py | https://github.com/huggingface/transformers/blob/master/tests/models/mbart/test_modeling_mbart.py | Apache-2.0 |
def model(self):
"""Only load the model if needed."""
model = MBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model | Only load the model if needed. | model | python | huggingface/transformers | tests/models/mbart/test_modeling_mbart.py | https://github.com/huggingface/transformers/blob/master/tests/models/mbart/test_modeling_mbart.py | Apache-2.0 |
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
ima... | This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
| prepare_image_inputs | python | huggingface/transformers | tests/models/mgp_str/test_processor_mgp_str.py | https://github.com/huggingface/transformers/blob/master/tests/models/mgp_str/test_processor_mgp_str.py | Apache-2.0 |
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_local_experts = 8
config.output_ro... |
Let's make sure we can actually compute the loss and do a backward on it.
| test_load_balancing_loss | python | huggingface/transformers | tests/models/minimax/test_modeling_minimax.py | https://github.com/huggingface/transformers/blob/master/tests/models/minimax/test_modeling_minimax.py | Apache-2.0 |
def test_past_key_values_format(self, custom_all_cache_shapes=None):
"""
Test that the KV cache is formatted correctly.
"""
for model_class in self.all_generative_model_classes:
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
model = mod... |
Test that the KV cache is formatted correctly.
| test_past_key_values_format | python | huggingface/transformers | tests/models/minimax/test_modeling_minimax.py | https://github.com/huggingface/transformers/blob/master/tests/models/minimax/test_modeling_minimax.py | Apache-2.0 |
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_siz... | Tests that special vision tokens do not get truncated when `truncation=True` is set. | test_special_mm_token_truncation | python | huggingface/transformers | tests/models/mistral3/test_processor_mistral3.py | https://github.com/huggingface/transformers/blob/master/tests/models/mistral3/test_processor_mistral3.py | Apache-2.0 |
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_local_experts = 8
config.output_ro... |
Let's make sure we can actually compute the loss and do a backward on it.
| test_load_balancing_loss | python | huggingface/transformers | tests/models/mixtral/test_modeling_mixtral.py | https://github.com/huggingface/transformers/blob/master/tests/models/mixtral/test_modeling_mixtral.py | Apache-2.0 |
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepare... | This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
| prepare_image_inputs | python | huggingface/transformers | tests/models/mllama/test_image_processing_mllama.py | https://github.com/huggingface/transformers/blob/master/tests/models/mllama/test_image_processing_mllama.py | Apache-2.0 |
def test_generate_text_only_with_cache(self):
"""
Tests that our cached generation with text-only inputs works. When mllama was introduced, this feature
required cache modifications (because layers are skipped in practice). This test should prevent regressions.
"""
config, inputs... |
Tests that our cached generation with text-only inputs works. When mllama was introduced, this feature
required cache modifications (because layers are skipped in practice). This test should prevent regressions.
| test_generate_text_only_with_cache | python | huggingface/transformers | tests/models/mllama/test_modeling_mllama.py | https://github.com/huggingface/transformers/blob/master/tests/models/mllama/test_modeling_mllama.py | Apache-2.0 |
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_siz... | Tests that special vision tokens do not get truncated when `truncation=True` is set. | test_special_mm_token_truncation | python | huggingface/transformers | tests/models/mllama/test_processor_mllama.py | https://github.com/huggingface/transformers/blob/master/tests/models/mllama/test_processor_mllama.py | Apache-2.0 |
def test_eager_matches_sdpa_generate(self):
"""Overwritten -- mochi has custom inputs and custom output checks"""
max_new_tokens = 5
for model_class in self.all_generative_model_classes:
if not model_class._supports_sdpa:
self.skipTest(f"{model_class.__name__} does ... | Overwritten -- mochi has custom inputs and custom output checks | test_eager_matches_sdpa_generate | python | huggingface/transformers | tests/models/moshi/test_modeling_moshi.py | https://github.com/huggingface/transformers/blob/master/tests/models/moshi/test_modeling_moshi.py | Apache-2.0 |
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<... |
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfMode... | test_small_integration_test | python | huggingface/transformers | tests/models/mt5/test_modeling_flax_mt5.py | https://github.com/huggingface/transformers/blob/master/tests/models/mt5/test_modeling_flax_mt5.py | Apache-2.0 |
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<... |
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfMode... | test_small_integration_test | python | huggingface/transformers | tests/models/mt5/test_modeling_mt5.py | https://github.com/huggingface/transformers/blob/master/tests/models/mt5/test_modeling_mt5.py | Apache-2.0 |
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<... |
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfMode... | test_small_integration_test | python | huggingface/transformers | tests/models/mt5/test_modeling_tf_mt5.py | https://github.com/huggingface/transformers/blob/master/tests/models/mt5/test_modeling_tf_mt5.py | Apache-2.0 |
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_d... | Produces a series of 'bip bip' sounds at a given frequency. | get_bip_bip | python | huggingface/transformers | tests/models/musicgen/test_modeling_musicgen.py | https://github.com/huggingface/transformers/blob/master/tests/models/musicgen/test_modeling_musicgen.py | Apache-2.0 |
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_d... | Produces a series of 'bip bip' sounds at a given frequency. | get_bip_bip | python | huggingface/transformers | tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py | https://github.com/huggingface/transformers/blob/master/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py | Apache-2.0 |
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_d... | Produces a series of 'bip bip' sounds at a given frequency. | get_bip_bip | python | huggingface/transformers | tests/models/musicgen_melody/test_modeling_musicgen_melody.py | https://github.com/huggingface/transformers/blob/master/tests/models/musicgen_melody/test_modeling_musicgen_melody.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/mvp/test_modeling_mvp.py | https://github.com/huggingface/transformers/blob/master/tests/models/mvp/test_modeling_mvp.py | Apache-2.0 |
def test_inference_logits(self):
r"""
Logits testing to check implementation consistency between `fairseq` implementation
and `transformers` implementation of NLLB-MoE transformers. We only check the logits
of the second sample of the batch, as it is padded.
"""
model = N... |
Logits testing to check implementation consistency between `fairseq` implementation
and `transformers` implementation of NLLB-MoE transformers. We only check the logits
of the second sample of the batch, as it is padded.
| test_inference_logits | python | huggingface/transformers | tests/models/nllb_moe/test_modeling_nllb_moe.py | https://github.com/huggingface/transformers/blob/master/tests/models/nllb_moe/test_modeling_nllb_moe.py | Apache-2.0 |
def test_batching_equivalence(self):
"""
Tests that the model supports batching and that the output is nearly the same for the same input in
different batch sizes.
(Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to
different... |
Tests that the model supports batching and that the output is nearly the same for the same input in
different batch sizes.
(Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to
different results: https://github.com/huggingface/transfo... | test_batching_equivalence | python | huggingface/transformers | tests/models/omdet_turbo/test_modeling_omdet_turbo.py | https://github.com/huggingface/transformers/blob/master/tests/models/omdet_turbo/test_modeling_omdet_turbo.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to OneFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
... |
This function computes the expected height and width when providing images to OneFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
| get_expected_values | python | huggingface/transformers | tests/models/oneformer/test_image_processing_oneformer.py | https://github.com/huggingface/transformers/blob/master/tests/models/oneformer/test_image_processing_oneformer.py | Apache-2.0 |
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to OneFormerProcessor,
assuming do_resize is set to True with a scalar size. It also provides the expected sequence length
for the task_inputs and te... |
This function computes the expected height and width when providing images to OneFormerProcessor,
assuming do_resize is set to True with a scalar size. It also provides the expected sequence length
for the task_inputs and text_list_input.
| get_expected_values | python | huggingface/transformers | tests/models/oneformer/test_processor_oneformer.py | https://github.com/huggingface/transformers/blob/master/tests/models/oneformer/test_processor_oneformer.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/opt/test_modeling_opt.py | https://github.com/huggingface/transformers/blob/master/tests/models/opt/test_modeling_opt.py | Apache-2.0 |
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"... |
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
| test_mismatching_num_image_tokens | python | huggingface/transformers | tests/models/paligemma/test_modeling_paligemma.py | https://github.com/huggingface/transformers/blob/master/tests/models/paligemma/test_modeling_paligemma.py | Apache-2.0 |
def test_attention_mask_with_token_types(self):
"""Test that attention masking works correctly both with and without token type IDs."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class._from... | Test that attention masking works correctly both with and without token type IDs. | test_attention_mask_with_token_types | python | huggingface/transformers | tests/models/paligemma/test_modeling_paligemma.py | https://github.com/huggingface/transformers/blob/master/tests/models/paligemma/test_modeling_paligemma.py | Apache-2.0 |
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"... |
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
| test_mismatching_num_image_tokens | python | huggingface/transformers | tests/models/paligemma2/test_modeling_paligemma2.py | https://github.com/huggingface/transformers/blob/master/tests/models/paligemma2/test_modeling_paligemma2.py | Apache-2.0 |
def setUpClass(cls):
"""Setup method: Called once before test-cases execution"""
cls.params = {}
cls.params.update(
context_length=32,
patch_length=8,
num_input_channels=3,
patch_stride=8,
d_model=4,
expansion_factor=2,
... | Setup method: Called once before test-cases execution | setUpClass | python | huggingface/transformers | tests/models/patchtsmixer/test_modeling_patchtsmixer.py | https://github.com/huggingface/transformers/blob/master/tests/models/patchtsmixer/test_modeling_patchtsmixer.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/pegasus/test_modeling_pegasus.py | https://github.com/huggingface/transformers/blob/master/tests/models/pegasus/test_modeling_pegasus.py | Apache-2.0 |
def test_equivalence_to_orig_tokenizer(self):
"""
To run with original TF tokenizer:
!wget https://github.com/google-research/bigbird/raw/master/bigbird/vocab/pegasus.model
!pip install tensorflow-text
import tensorflow.compat.v2 as tf
import tensorflow_text as tft
... |
To run with original TF tokenizer:
!wget https://github.com/google-research/bigbird/raw/master/bigbird/vocab/pegasus.model
!pip install tensorflow-text
import tensorflow.compat.v2 as tf
import tensorflow_text as tft
VOCAB_FILE = "./pegasus.model"
tf.enable_v2... | test_equivalence_to_orig_tokenizer | python | huggingface/transformers | tests/models/pegasus/test_tokenization_pegasus.py | https://github.com/huggingface/transformers/blob/master/tests/models/pegasus/test_tokenization_pegasus.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/pegasus_x/test_modeling_pegasus_x.py | https://github.com/huggingface/transformers/blob/master/tests/models/pegasus_x/test_modeling_pegasus_x.py | Apache-2.0 |
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
rais... | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | assert_tensors_close | python | huggingface/transformers | tests/models/plbart/test_modeling_plbart.py | https://github.com/huggingface/transformers/blob/master/tests/models/plbart/test_modeling_plbart.py | Apache-2.0 |
def model(self):
"""Only load the model if needed."""
model = PLBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model | Only load the model if needed. | model | python | huggingface/transformers | tests/models/plbart/test_modeling_plbart.py | https://github.com/huggingface/transformers/blob/master/tests/models/plbart/test_modeling_plbart.py | Apache-2.0 |
def get_inputs(self):
"""get inputs for both feature extractor and tokenizer"""
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = ds.sort("id").select([0])["audio"]
input_speech = [x["array"] for x in speech_samples][0]
s... | get inputs for both feature extractor and tokenizer | get_inputs | python | huggingface/transformers | tests/models/pop2piano/test_processor_pop2piano.py | https://github.com/huggingface/transformers/blob/master/tests/models/pop2piano/test_processor_pop2piano.py | Apache-2.0 |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.