text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Copyright 2021 HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from functools import lru_cache from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class BartphoTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "vinai/bartpho-syllable" tokenizer_class = BartphoTokenizer test_rust_tokenizer = False test_sentencepiece = True @classmethod def setUpClass(cls): super().setUpClass() vocab = ["▁This", "▁is", "▁a", "▁t", "est"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) cls.special_tokens_map = {"unk_token": "<unk>"} cls.monolingual_vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(cls.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") tokenizer = BartphoTokenizer(SAMPLE_VOCAB, cls.monolingual_vocab_file, **cls.special_tokens_map) tokenizer.save_pretrained(cls.tmpdirname) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return BartphoTokenizer.from_pretrained(pretrained_name, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "This is a là test" output_text = "This is a<unk><unk> test" return input_text, output_text def test_full_tokenizer(self): tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map) text = "This is a là test" bpe_tokens = "▁This ▁is ▁a ▁l à ▁t est".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
transformers/tests/models/bartpho/test_tokenization_bartpho.py/0
{ "file_path": "transformers/tests/models/bartpho/test_tokenization_bartpho.py", "repo_id": "transformers", "token_count": 1178 }
493
#!/usr/bin/env python3 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Blenderbot small tokenizer.""" import json import os import unittest from functools import lru_cache from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/blenderbot_small-90M" tokenizer_class = BlenderbotSmallTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] cls.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(cls.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return BlenderbotSmallTokenizer.from_pretrained(pretrained_name, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "adapt act apte" output_text = "adapt act apte" return input_text, output_text def test_full_blenderbot_small_tokenizer(self): tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "adapt act apte" bpe_tokens = ["adapt", "act", "ap@@", "te"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] input_bpe_tokens = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_special_tokens_small_tok(self): tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") assert tok("sam").input_ids == [1384] src_text = "I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def test_empty_word_small_tok(self): tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") src_text = "I am a small frog ." src_text_dot = "." encoded = tok(src_text)["input_ids"] encoded_dot = tok(src_text_dot)["input_ids"] assert encoded[-1] == encoded_dot[0]
transformers/tests/models/blenderbot_small/test_tokenization_blenderbot_small.py/0
{ "file_path": "transformers/tests/models/blenderbot_small/test_tokenization_blenderbot_small.py", "repo_id": "transformers", "token_count": 1611 }
494
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Chinese-CLIP model.""" import inspect import os import tempfile import unittest import numpy as np import requests from transformers import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ChineseCLIPModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) if is_vision_available(): from PIL import Image from transformers import ChineseCLIPProcessor class ChineseCLIPTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return ChineseCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ChineseCLIPTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ChineseCLIPTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict class ChineseCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return ChineseCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = ChineseCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPTextModel,) if is_torch_available() else () fx_compatible = False # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ChineseCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ChineseCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CHINESE_CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ChineseCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ChineseCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=ChineseCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CHINESE_CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class ChineseCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = ChineseCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = ChineseCLIPVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): ( config, input_ids, token_type_ids, attention_mask, _, __, ___, ) = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, token_type_ids, attention_mask, pixel_values def get_config(self): return ChineseCLIPConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values): model = ChineseCLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask, token_type_ids) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ChineseCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): text_kwargs = {"use_labels": False, "batch_size": 12} vision_kwargs = {"batch_size": 12} self.model_tester = ChineseCLIPModelTester(self, text_kwargs, vision_kwargs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ChineseCLIPModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initialization is different for CHINESE_CLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for sub_config_key in ("vision_config", "text_config"): sub_config = getattr(configs_no_init, sub_config_key, {}) setattr(configs_no_init, sub_config_key, _config_zero_init(sub_config)) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CHINESE_CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of Pikachu def prepare_img(): url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class ChineseCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name).to(torch_device) processor = ChineseCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, padding=True, return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]], device=torch_device) torch.testing.assert_close(probs, expected_probs, rtol=5e-3, atol=5e-3) @slow def test_inference_interpolate_pos_encoding(self): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name).to(torch_device) image_processor = ChineseCLIPProcessor.from_pretrained( model_name, size={"height": 180, "width": 180}, crop_size={"height": 180, "width": 180} ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # interpolate_pos_encodiung false should return value error with self.assertRaises(ValueError, msg="doesn't match model"): with torch.no_grad(): model(**inputs, interpolate_pos_encoding=False) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 122, 768)) self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3990, 0.2983, -0.1239], [-0.1452, -0.2759, 0.0403], [-0.3149, -0.4763, 0.8555]] ).to(torch_device) torch.testing.assert_close( outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4 )
transformers/tests/models/chinese_clip/test_modeling_chinese_clip.py/0
{ "file_path": "transformers/tests/models/chinese_clip/test_modeling_chinese_clip.py", "repo_id": "transformers", "token_count": 13156 }
495
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Clvp model.""" import tempfile import unittest import datasets import numpy as np from transformers import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig from transformers.testing_utils import ( cleanup, require_torch, slow, torch_device, ) from transformers.utils import is_torch_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration from transformers import ClvpFeatureExtractor, ClvpTokenizer class ClvpEncoderTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, use_input_mask=True, use_labels=True, vocab_size=50, hidden_size=128, projection_dim=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=32, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 def get_config(self): encoder_config = ClvpEncoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, ) return encoder_config def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 encoder_config = self.get_config() return encoder_config, input_ids, input_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() speech_config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids.to(torch_device), "attention_mask": input_mask.to(torch_device)} return speech_config, inputs_dict def create_and_check_model(self, speech_config, input_ids, input_mask): text_config = ClvpEncoderConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) text_encoder_model = ClvpEncoder(config=text_config) text_encoder_model.to(torch_device) text_encoder_model.eval() with torch.no_grad(): result = text_encoder_model(input_ids, attention_mask=input_mask) result = text_encoder_model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) # now check with speech config speech_encoder_model = ClvpEncoder(config=speech_config) speech_encoder_model.to(torch_device) speech_encoder_model.eval() with torch.no_grad(): result = speech_encoder_model(input_ids, attention_mask=input_mask) result = speech_encoder_model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim)) @require_torch class ClvpEncoderTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClvpEncoder,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = ClvpEncoderTester(self) self.encoder_config_tester = ConfigTester(self, config_class=ClvpEncoderConfig, hidden_size=32) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_config(self): self.encoder_config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="ClvpEncoder does not output loss") def test_training(self): pass @unittest.skip(reason="ClvpEncoder does not output loss") def test_training_gradient_checkpointing(self): pass class ClvpDecoderTester: def __init__( self, parent, batch_size=2, seq_length=3, is_training=False, vocab_size=300, max_position_embeddings=256, max_text_tokens=256, use_input_mask=True, hidden_size=128, num_hidden_layers=2, num_attention_heads=2, bos_token_id=97, eos_token_id=98, relative_attention_num_buckets=4, relative_attention_max_distance=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.max_text_tokens = max_text_tokens self.use_input_mask = use_input_mask self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance def get_config(self): decoder_config = ClvpDecoderConfig( vocab_size=self.vocab_size, max_position_embeddings=self.max_position_embeddings, max_text_tokens=self.max_text_tokens, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, relative_attention_num_buckets=self.relative_attention_num_buckets, relative_attention_max_distance=self.relative_attention_max_distance, ) return decoder_config def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 decoder_config = self.get_config() return decoder_config, input_ids, input_mask def create_and_check_model(self, config, input_ids, attention_mask): model = ClvpForCausalLM(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids=input_ids, attention_mask=attention_mask) self.parent.assertEqual(result[0].shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = { "input_ids": input_ids.to(torch_device), "attention_mask": attention_mask.to(torch_device), } return config, inputs_dict @require_torch class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ClvpModelForConditionalGeneration} if is_torch_available() else {} test_pruning = False def setUp(self): self.model_tester = ClvpDecoderTester(self) self.decoder_config_tester = ConfigTester(self, config_class=ClvpDecoderConfig, hidden_size=32) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels and model_class == ClvpForCausalLM: inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, self.model_tester.seq_length], device=torch_device ).long() return inputs_dict def test_training(self): # we will only test the ClvpForCausalLM since it outputs loss config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = ClvpForCausalLM(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): # we will only test the ClvpForCausalLM since it outputs loss config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = ClvpForCausalLM(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip(reason="Clvp `prepare_inputs_for_generation` function doesn't have cache position.") def test_generate_continue_from_inputs_embeds(self): pass class ClvpModelForConditionalGenerationTester: def __init__(self, parent, is_training=False): self.parent = parent self.clvp_encoder_tester = ClvpEncoderTester(parent) self.is_training = is_training self.batch_size = self.clvp_encoder_tester.batch_size # need bs for batching_equivalence test def get_config(self): decoder_config = ClvpDecoderConfig( vocab_size=50, max_position_embeddings=30, max_text_tokens=30, hidden_size=128, num_hidden_layers=1, num_attention_heads=2, bos_token_id=97, eos_token_id=98, relative_attention_num_buckets=4, relative_attention_max_distance=16, ) text_config = self.clvp_encoder_tester.get_config() speech_config = self.clvp_encoder_tester.get_config() speech_config.vocab_size = 300 return ClvpConfig.from_sub_model_configs( text_config, speech_config, decoder_config, projection_dim=16, ) def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.clvp_encoder_tester.prepare_config_and_inputs() ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) audio = ds.sort("id")[0]["audio"] audio_sample = audio["array"] sr = audio["sampling_rate"] feature_extractor = ClvpFeatureExtractor() input_features = feature_extractor(raw_speech=audio_sample, sampling_rate=sr, return_tensors="pt")[ "input_features" ].to(torch_device) config = self.get_config() return config, input_ids, attention_mask, input_features def create_and_check_model(self, config, input_ids, attention_mask, input_features): model = ClvpModelForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids=input_ids, input_features=input_features, attention_mask=attention_mask) self.parent.assertEqual(result.logits_per_speech.shape, (2, self.clvp_encoder_tester.batch_size)) self.parent.assertEqual(result.logits_per_text.shape, (self.clvp_encoder_tester.batch_size, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, input_features = config_and_inputs inputs_dict = { "input_ids": input_ids.to(torch_device), "attention_mask": attention_mask.to(torch_device), "input_features": input_features.to(torch_device), "return_loss": False, } return config, inputs_dict @require_torch class ClvpModelForConditionalGenerationTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ClvpModelForConditionalGeneration,) if is_torch_available() else () # Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante all_generative_model_classes = () test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = ClvpModelForConditionalGenerationTester(self) common_properties = ["projection_dim", "logit_scale_init_value"] self.clvp_config_tester = ConfigTester( self, config_class=ClvpConfig, has_text_modality=False, common_properties=common_properties, hidden_size=32 ) def test_config(self): self.clvp_config_tester.run_common_tests() def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # check for decoder model, text encoder model and speech encoder model hidden states decoder_hidden_states = outputs.decoder_hidden_states text_encoder_hidden_states = outputs.text_encoder_hidden_states speech_encoder_hidden_states = outputs.speech_encoder_hidden_states # check length of the hidden states expected_decoder_num_layers = config.decoder_config.num_hidden_layers + 1 self.assertEqual(len(decoder_hidden_states), expected_decoder_num_layers) expected_speech_encoder_num_layers = config.text_config.num_hidden_layers + 1 self.assertEqual(len(text_encoder_hidden_states), expected_speech_encoder_num_layers) expected_text_encoder_num_layers = config.speech_config.num_hidden_layers + 1 self.assertEqual(len(speech_encoder_hidden_states), expected_text_encoder_num_layers) # check shapes of each hidden state # for the decoder model we will only test the dimension because the ClvpConditioningEncoder could increase # the sequence lengths. self.assertEqual(decoder_hidden_states[0].shape[-1], config.decoder_config.hidden_size) # the testing for text encoder stays standard because we just pass the text tokens here. self.assertListEqual( list(text_encoder_hidden_states[0].shape[-2:]), [self.model_tester.clvp_encoder_tester.seq_length, config.text_config.hidden_size], ) # for the decoder model we will only test the dimension because the fix_decoder_outputs method could increase # the sequence lengths by adding `decoder_fixing_codes` tokens at the end. self.assertEqual(speech_encoder_hidden_states[0].shape[-1], config.speech_config.hidden_size) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initialization is different for Clvp def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": expected_value = np.log(1 / 0.07) returned_value = param.data.item() self.assertAlmostEqual( returned_value, expected_value, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: expected_range = [0.0, 1.0] returned_range = ((param.data.mean() * 1e9).round() / 1e9).item() self.assertIn( returned_range, expected_range, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_load_speech_text_decoder_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save ClvpConfig and check if we can load ClvpEncoderConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) encoder_config = ClvpEncoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), encoder_config.to_dict()) # Save ClvpConfig and check if we can load ClvpDecoderConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) decoder_config = ClvpDecoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.decoder_config.to_dict(), decoder_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "susnato/clvp_dev" model = ClvpModelForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # Since Clvp has a lot of different models connected with each other it's better to test each of them individually along # with a test_full_model_integration. If the model breaks in future, it could be of a great help to identify the broken part. @slow @require_torch class ClvpIntegrationTest(unittest.TestCase): def setUp(self): self.text = "This is an example text." ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) audio = ds.sort("id")["audio"][0] self.speech_samples, self.sr = audio["array"], audio["sampling_rate"] self.model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev").to(torch_device) self.model.eval() tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev") feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") tokenizer_output = tokenizer(self.text, return_tensors="pt") self.text_tokens = tokenizer_output["input_ids"].to(torch_device) self.input_features = feature_extractor( raw_speech=self.speech_samples, sampling_rate=self.sr, return_tensors="pt" )["input_features"].to(torch_device) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def test_conditional_encoder(self): with torch.no_grad(): conditioning_encoder_outputs = self.model.conditioning_encoder( input_features=self.input_features, input_ids=self.text_tokens ).to("cpu") self.assertEqual( conditioning_encoder_outputs.shape, torch.Size((self.input_features.shape[0], 18, self.model.config.decoder_config.hidden_size)), ) EXPECTED_OUTPUTS = torch.tensor( [[-0.8582, 0.5228, 1.9944], [-0.0465, -1.1017, -0.0093], [-0.0466, -0.6030, -0.1280]] ) torch.testing.assert_close(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, rtol=1e-4, atol=1e-4) def test_decoder_model_generate(self): autoregressive_model_output = self.model.speech_decoder_model.generate(input_ids=self.text_tokens).cpu() EXPECTED_OUTPUTS = torch.tensor([[147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 9, 8193]]) torch.testing.assert_close(autoregressive_model_output, EXPECTED_OUTPUTS) def test_text_and_speech_encoder_models(self): # check for text embeds text_embeds = self.model.text_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() # fmt: off EXPECTED_TEXT_EMBEDS = torch.tensor([1.4798, -2.0005, 2.3902, -0.5042, 1.6401, -2.4135, -1.4800, 3.0118, -2.4422, 1.3266, 2.2339, 1.4761, -4.8983, -1.3592, 6.0251, 6.7364, 2.2576, 3.7229, -10.0436, 4.6676]) # fmt: on torch.testing.assert_close(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, rtol=1e-4, atol=1e-4) # check for speech embeds speech_embeds = self.model.speech_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu() # fmt: off EXPECTED_SPEECH_EMBEDS = torch.tensor([3.1202, -3.1183, -1.4264, -6.1339, 1.8885, -0.1983, 0.9461, -1.7414, 0.3320, -3.8400, -1.5715, 1.5096, -1.7576, 0.2387, 4.9758, 5.8450, -6.2534, 2.8587, -5.5816, 4.7821]) # fmt: on torch.testing.assert_close(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, rtol=1e-4, atol=1e-4) def test_full_model_integration(self): full_model_output = self.model.generate( input_ids=self.text_tokens, input_features=self.input_features, do_sample=False, num_beams=4, num_return_sequences=4, max_new_tokens=10, ) EXPECTED_SPEECH_IDS = torch.tensor([[1953, 1080, 612], [1953, 612, 493], [1953, 612, 716]]) EXPECTED_SIMILARITY_SCORES = torch.tensor([[14.7660, 14.4569, 13.6472, 13.5683]]) torch.testing.assert_close(full_model_output.speech_ids.cpu()[-3:, -3:], EXPECTED_SPEECH_IDS) torch.testing.assert_close(full_model_output.logits_per_text.cpu(), EXPECTED_SIMILARITY_SCORES)
transformers/tests/models/clvp/test_modeling_clvp.py/0
{ "file_path": "transformers/tests/models/clvp/test_modeling_clvp.py", "repo_id": "transformers", "token_count": 11821 }
496
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest import numpy as np from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor, DeformableDetrImageProcessorFast class DeformableDetrImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to DeformableDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DeformableDetrImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None fast_image_processing_class = DeformableDetrImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = DeformableDetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class() encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) torch.testing.assert_close(encoding["labels"][0]["area"], expected_area) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3) # verify image_id expected_image_id = torch.tensor([39769]) torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels) # verify orig_size expected_orig_size = torch.tensor([480, 640]) torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size) # verify size expected_size = torch.tensor([800, 1066]) torch.testing.assert_close(encoding["labels"][0]["size"], expected_size) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) torch.testing.assert_close(encoding["labels"][0]["area"], expected_area) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3) # verify image_id expected_image_id = torch.tensor([39769]) torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels) # verify masks expected_masks_sum = 822873 relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum self.assertTrue(relative_error < 1e-3) # verify orig_size expected_orig_size = torch.tensor([480, 640]) torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size) # verify size expected_size = torch.tensor([800, 1066]) torch.testing.assert_close(encoding["labels"][0]["size"], expected_size) @slow # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->DeformableDetr def test_batched_coco_detection_annotations(self): image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) annotations_0 = {"image_id": 39769, "annotations": target} annotations_1 = {"image_id": 39769, "annotations": target} # Adjust the bounding boxes for the resized image w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotations_1["annotations"])): coords = annotations_1["annotations"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotations_1["annotations"][i]["bbox"] = new_bbox images = [image_0, image_1] annotations = [annotations_0, annotations_1] for image_processing_class in self.image_processor_list: image_processing = image_processing_class() encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, return_tensors="pt", # do_convert_annotations=True ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.6879, 0.4609, 0.0755, 0.3691], [0.2118, 0.3359, 0.2601, 0.1566], [0.5011, 0.5000, 0.9979, 1.0000], [0.5010, 0.5020, 0.9979, 0.9959], [0.3284, 0.5944, 0.5884, 0.8112], [0.8394, 0.5445, 0.3213, 0.9110], ] ) expected_boxes_1 = torch.tensor( [ [0.4130, 0.2765, 0.0453, 0.2215], [0.1272, 0.2016, 0.1561, 0.0940], [0.3757, 0.4933, 0.7488, 0.9865], [0.3759, 0.5002, 0.7492, 0.9955], [0.1971, 0.5456, 0.3532, 0.8646], [0.5790, 0.4115, 0.3430, 0.7161], ] ) torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1) # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->DeformableDetr def test_batched_coco_panoptic_annotations(self): # prepare image, target and masks_path image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotation_1["segments_info"])): coords = annotation_1["segments_info"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotation_1["segments_info"][i]["bbox"] = new_bbox masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") images = [image_0, image_1] annotations = [annotation_0, annotation_1] for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class(format="coco_panoptic") encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_tensors="pt", return_segmentation_masks=True, ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.2625, 0.5437, 0.4688, 0.8625], [0.7719, 0.4104, 0.4531, 0.7125], [0.5000, 0.4927, 0.9969, 0.9854], [0.1688, 0.2000, 0.2063, 0.0917], [0.5492, 0.2760, 0.0578, 0.2187], [0.4992, 0.4990, 0.9984, 0.9979], ] ) expected_boxes_1 = torch.tensor( [ [0.1576, 0.3262, 0.2814, 0.5175], [0.4634, 0.2463, 0.2720, 0.4275], [0.3002, 0.2956, 0.5985, 0.5913], [0.1013, 0.1200, 0.1238, 0.0550], [0.3297, 0.1656, 0.0347, 0.1312], [0.2997, 0.2994, 0.5994, 0.5987], ] ) torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1) # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->DeformableDetr def test_max_width_max_height_resizing_and_pad_strategy(self): for image_processing_class in self.image_processor_list: image_1 = torch.ones([200, 100, 3], dtype=torch.uint8) # do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50 image_processor = image_processing_class( size={"max_height": 100, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50])) # do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100 image_processor = image_processing_class( size={"max_height": 300, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") # do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100 image_processor = image_processing_class( size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100} ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100])) # do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100 image_processor = image_processing_class( size={"max_height": 300, "max_width": 100}, do_pad=True, pad_size={"height": 301, "width": 101}, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101])) ### Check for batch image_2 = torch.ones([100, 150, 3], dtype=torch.uint8) # do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100 image_processor = image_processing_class( size={"max_height": 150, "max_width": 100}, do_pad=True, pad_size={"height": 150, "width": 100}, ) inputs = image_processor(images=[image_1, image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100])) def test_longest_edge_shortest_edge_resizing_strategy(self): for image_processing_class in self.image_processor_list: image_1 = torch.ones([958, 653, 3], dtype=torch.uint8) # max size is set; width < height; # do_pad=False, longest_edge=640, shortest_edge=640, image=958x653 -> 640x436 image_processor = image_processing_class( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 640, 436])) image_2 = torch.ones([653, 958, 3], dtype=torch.uint8) # max size is set; height < width; # do_pad=False, longest_edge=640, shortest_edge=640, image=653x958 -> 436x640 image_processor = image_processing_class( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 436, 640])) image_3 = torch.ones([100, 120, 3], dtype=torch.uint8) # max size is set; width == size; height > max_size; # do_pad=False, longest_edge=118, shortest_edge=100, image=120x100 -> 118x98 image_processor = image_processing_class( size={"longest_edge": 118, "shortest_edge": 100}, do_pad=False, ) inputs = image_processor(images=[image_3], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 98, 118])) image_4 = torch.ones([128, 50, 3], dtype=torch.uint8) # max size is set; height == size; width < max_size; # do_pad=False, longest_edge=256, shortest_edge=50, image=50x128 -> 50x128 image_processor = image_processing_class( size={"longest_edge": 256, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_4], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 128, 50])) image_5 = torch.ones([50, 50, 3], dtype=torch.uint8) # max size is set; height == width; width < max_size; # do_pad=False, longest_edge=117, shortest_edge=50, image=50x50 -> 50x50 image_processor = image_processing_class( size={"longest_edge": 117, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_5], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 50, 50])) @slow @require_torch_accelerator # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations def test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} # Ignore copy processor = self.image_processor_list[1]() # 1. run processor on CPU encoding_cpu = processor(images=image, annotations=target, return_tensors="pt", device="cpu") # 2. run processor on accelerator encoding_gpu = processor(images=image, annotations=target, return_tensors="pt", device=torch_device) # verify pixel values self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape) self.assertTrue( torch.allclose( encoding_cpu["pixel_values"][0, 0, 0, :3], encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"), atol=1e-4, ) ) # verify area torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")) # verify boxes self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape) self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3 ) ) # verify image_id torch.testing.assert_close( encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu") ) # verify is_crowd torch.testing.assert_close( encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu") ) # verify class_labels self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu") ) ) # verify orig_size torch.testing.assert_close( encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu") ) # verify size torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")) @slow @require_torch_accelerator # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_fast_processor_equivalence_cpu_accelerator_coco_panoptic_annotations def test_fast_processor_equivalence_cpu_accelerator_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # Ignore copy processor = self.image_processor_list[1](format="coco_panoptic") # 1. run processor on CPU encoding_cpu = processor( images=image, annotations=target, masks_path=masks_path, return_tensors="pt", device="cpu" ) # 2. run processor on accelerator encoding_gpu = processor( images=image, annotations=target, masks_path=masks_path, return_tensors="pt", device=torch_device ) # verify pixel values self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape) self.assertTrue( torch.allclose( encoding_cpu["pixel_values"][0, 0, 0, :3], encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"), atol=1e-4, ) ) # verify area torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")) # verify boxes self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape) self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3 ) ) # verify image_id torch.testing.assert_close( encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu") ) # verify is_crowd torch.testing.assert_close( encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu") ) # verify class_labels self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu") ) ) # verify masks masks_sum_cpu = encoding_cpu["labels"][0]["masks"].sum() masks_sum_gpu = encoding_gpu["labels"][0]["masks"].sum() relative_error = torch.abs(masks_sum_cpu - masks_sum_gpu) / masks_sum_cpu self.assertTrue(relative_error < 1e-3) # verify orig_size torch.testing.assert_close( encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu") ) # verify size torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py/0
{ "file_path": "transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py", "repo_id": "transformers", "token_count": 17356 }
497
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from parameterized import parameterized from transformers import DacModel, DiaFeatureExtractor, DiaProcessor, DiaTokenizer from transformers.testing_utils import require_torch from transformers.utils import is_torch_available if is_torch_available: import torch # Copied from tests.utils.test_modeling_utils.check_models_equal def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class DiaProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "AntonV/Dia-1.6B" self.audio_tokenizer_checkpoint = "descript/dac_44khz" self.tmpdirname = tempfile.mkdtemp() # Audio tokenizer is a bigger model so we will reuse this if possible self.processor = DiaProcessor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), audio_tokenizer=self.get_audio_tokenizer(), ) # Default audio values based on Dia and Dac self.pad_id = 1025 self.bos_id = 1026 self.dac_chunk_len = 512 self.delay_pattern = [0, 8, 9, 10, 11, 12, 13, 14, 15] def get_tokenizer(self, **kwargs): return DiaTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return DiaFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def get_audio_tokenizer(self, **kwargs): return DacModel.from_pretrained(self.audio_tokenizer_checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) del self.processor def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() audio_tokenizer = self.get_audio_tokenizer() processor = DiaProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, audio_tokenizer=audio_tokenizer ) processor.save_pretrained(self.tmpdirname) processor = DiaProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, DiaTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, DiaFeatureExtractor) self.assertEqual(processor.audio_tokenizer.__class__.__name__, audio_tokenizer.__class__.__name__) self.assertEqual(processor.audio_tokenizer.name_or_path, audio_tokenizer.name_or_path) self.assertTrue(check_models_equal(processor.audio_tokenizer, audio_tokenizer)) self.assertIsInstance(processor.audio_tokenizer, DacModel) def test_save_load_pretrained_additional_features(self): processor = DiaProcessor( tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), audio_tokenizer=self.get_audio_tokenizer(), ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer() feature_extractor_add_kwargs = self.get_feature_extractor() audio_tokenizer_add_kwargs = self.get_audio_tokenizer() processor = DiaProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, DiaTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, DiaFeatureExtractor) self.assertEqual(processor.audio_tokenizer.__class__.__name__, audio_tokenizer_add_kwargs.__class__.__name__) self.assertEqual(processor.audio_tokenizer.name_or_path, audio_tokenizer_add_kwargs.name_or_path) self.assertTrue(check_models_equal(processor.audio_tokenizer, audio_tokenizer_add_kwargs)) self.assertIsInstance(processor.audio_tokenizer, DacModel) def test_tokenize(self): tokenizer = self.get_tokenizer() random_text = ["This is a processing test for tokenization", "[S1] Dia template style [S2] Nice"] input_tokenizer = tokenizer(random_text, padding=True, return_tensors="pt") input_processor = self.processor(random_text) for key in input_tokenizer: self.assertTrue((input_tokenizer[key] == input_processor[key]).all()) def test_no_audio(self): random_text = ["Dummy Input"] * 2 input_processor = self.processor(random_text) audio_tokens, audio_mask = input_processor["decoder_input_ids"], input_processor["decoder_attention_mask"] # full mask with +1 for bos self.assertTrue(audio_mask.sum() == (max(self.delay_pattern) + 1) * len(random_text)) self.assertTrue( audio_tokens.shape == ( len(random_text), max(self.delay_pattern) + 1, len(self.delay_pattern), ) ) for channel_idx, delay in enumerate(self.delay_pattern): expected_sequence = torch.ones(size=(audio_tokens.shape[:-1])) * self.pad_id expected_sequence[:, : delay + 1] = self.bos_id self.assertTrue((audio_tokens[..., channel_idx] == expected_sequence).all()) def test_audio(self): audio_tokenizer = self.get_audio_tokenizer() feature_extractor = self.get_feature_extractor() random_text = ["Dummy Input"] * 2 # Dac only starts accepting audio from a certain length (ensured via >=1024) raw_speeches = [np.random.rand(2048).astype(np.float32), np.random.rand(1024).astype(np.float32)] input_processor = self.processor(random_text, raw_speeches) audio_tokens, audio_mask = input_processor["decoder_input_ids"], input_processor["decoder_attention_mask"] sequence_len = audio_mask.shape[1] for batch_idx, speech in enumerate(raw_speeches): raw_audio = feature_extractor(speech, return_tensors="pt")["input_values"] codebooks = audio_tokenizer(raw_audio).audio_codes.transpose(1, 2) pad_len = sequence_len - audio_mask.sum(dim=-1)[batch_idx] for channel_idx, delay in enumerate(self.delay_pattern): # Left padding filled bos, right padding (delay) are pad start_idx = pad_len + delay + 1 end_idx = start_idx + codebooks.shape[1] encoded_sequence = audio_tokens[batch_idx, :, channel_idx] expected_sequence = torch.ones(size=(sequence_len,)) * self.pad_id expected_sequence[:start_idx] = self.bos_id expected_sequence[start_idx:end_idx] = codebooks[0, :, channel_idx] self.assertTrue((encoded_sequence == expected_sequence).all()) # Just to make sure the masking correctly only ignores bos tokens self.assertTrue((audio_tokens[~audio_mask.bool()] == self.bos_id).all()) @parameterized.expand([([1, 1],), ([1, 5],), ([2, 4, 6],)]) def test_decode_audio(self, audio_lens): feature_extractor = self.get_feature_extractor() audio_tokenizer = self.get_audio_tokenizer() random_text = ["Dummy Input"] * len(audio_lens) raw_speeches = [np.random.rand(self.dac_chunk_len * l).astype(np.float32) for l in audio_lens] # we need eos (given if training) to decode properly, also enforced via custom logits processor input_processor = self.processor(random_text, raw_speeches, generation=False) audio_tokens = input_processor["decoder_input_ids"] decoded_speeches = self.processor.batch_decode(audio_tokens) for batch_idx, speech in enumerate(raw_speeches): raw_audio = feature_extractor(speech, return_tensors="pt")["input_values"] codebooks = audio_tokenizer(raw_audio).audio_codes decoded_audio = decoded_speeches[batch_idx] expected_audio = audio_tokenizer.decode(audio_codes=codebooks).audio_values self.assertTrue((expected_audio == decoded_audio).all()) self.assertTrue(decoded_speeches[batch_idx].shape[-1] == audio_lens[batch_idx] * self.dac_chunk_len) @parameterized.expand([(1, 2, [0, 1, 4]), (2, 4, [1, 3, 2]), (4, 8, [0, 5, 7])]) def test_delay_in_audio(self, bsz, seq_len, delay_pattern): # static functions which are crucial, hence we also test them here build_indices_fn = DiaProcessor.build_indices delay_fn = DiaProcessor.apply_audio_delay bos, pad = -2, -1 num_channels = len(delay_pattern) audio_input = torch.arange(bsz * seq_len * num_channels).view(bsz, seq_len, num_channels) # imitate a delay mask with zeroes audio_input = torch.cat([audio_input, torch.zeros(size=(bsz, max(delay_pattern), num_channels))], dim=1) precomputed_idx = build_indices_fn( bsz=bsz, seq_len=seq_len + max(delay_pattern), num_channels=num_channels, delay_pattern=delay_pattern, revert=False, ) delayed_audio_out = delay_fn( audio=audio_input, pad_token_id=pad, bos_token_id=bos, precomputed_idx=precomputed_idx, ) # every channel idx is shifted by delay_pattern[idx] delayed_audio_res = audio_input.clone() for idx, delay in enumerate(delay_pattern): delayed_audio_res[:, :delay, idx] = bos remaining_input = seq_len + max(delay_pattern) - delay delayed_audio_res[:, delay:, idx] = audio_input[:, :remaining_input, idx] self.assertTrue((delayed_audio_out == delayed_audio_res).all()) # we should get back to the original audio we had (when removing the delay pad) bsz, new_seq_len, num_channels = delayed_audio_out.shape precomputed_idx = build_indices_fn( bsz=bsz, seq_len=new_seq_len, num_channels=num_channels, delay_pattern=delay_pattern, revert=True, ) reverted_audio_out = delay_fn( audio=delayed_audio_out, pad_token_id=pad, bos_token_id=bos, precomputed_idx=precomputed_idx, ) reverted_audio_res = audio_input.clone()[:, :seq_len] self.assertTrue((reverted_audio_out[:, :seq_len] == reverted_audio_res).all())
transformers/tests/models/dia/test_processing_dia.py/0
{ "file_path": "transformers/tests/models/dia/test_processing_dia.py", "repo_id": "transformers", "token_count": 4754 }
498
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_flash_attn, require_torch, require_torch_accelerator, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from transformers.models.distilbert.modeling_distilbert import _create_sinusoidal_embeddings from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 class DistilBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_distilbert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_distilbert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_distilbert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DistilBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_distilbert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DistilBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_distilbert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = DistilBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_distilbert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = DistilBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = True def setUp(self): self.model_tester = DistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) def test_distilbert_model_with_sinusoidal_encodings(self): config = DistilBertConfig(sinusoidal_pos_embds=True) model = DistilBertModel(config=config) sinusoidal_pos_embds = torch.empty((config.max_position_embeddings, config.dim), dtype=torch.float32) _create_sinusoidal_embeddings(config.max_position_embeddings, config.dim, sinusoidal_pos_embds) self.model_tester.parent.assertTrue( torch.equal(model.embeddings.position_embeddings.weight, sinusoidal_pos_embds) ) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "distilbert-base-uncased" model = DistilBertModel.from_pretrained(model_name) self.assertIsNotNone(model) # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): import torch for model_class in self.all_model_classes: dummy_input = torch.LongTensor( [ [1, 2, 3, 4], [1, 2, 8, 9], [1, 2, 11, 12], [1, 2, 13, 14], ] ).to(torch_device) dummy_attention_mask = torch.LongTensor( [ [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], ] ).to(torch_device) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] torch.testing.assert_close(logits_fa, logits, rtol=4e-2, atol=4e-2) output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits_fa = output_fa.hidden_states[-1] output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits = output.hidden_states[-1] torch.testing.assert_close(logits_fa[1:], logits[1:], rtol=4e-2, atol=4e-2) # Because DistilBertForMultipleChoice requires inputs with different shapes we need to override this test. @require_flash_attn @require_torch_accelerator @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): import torch for model_class in self.all_model_classes: dummy_input = torch.LongTensor( [ [1, 2, 3, 4], [1, 2, 8, 9], [1, 2, 11, 12], [1, 2, 13, 14], ] ).to(torch_device) dummy_attention_mask = torch.LongTensor( [ [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], ] ).to(torch_device) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, ) model.to(torch_device) logits = model(dummy_input, output_hidden_states=True).hidden_states[-1] logits_fa = model_fa(dummy_input, output_hidden_states=True).hidden_states[-1] torch.testing.assert_close(logits_fa, logits, rtol=4e-2, atol=4e-2) output_fa = model_fa(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits_fa = output_fa.hidden_states[-1] output = model(dummy_input, attention_mask=dummy_attention_mask, output_hidden_states=True) logits = output.hidden_states[-1] torch.testing.assert_close(logits_fa[:-1], logits[:-1], rtol=4e-2, atol=4e-2) @require_torch class DistilBertModelIntergrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = DistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4) @pytest.mark.torch_export_test @slow def test_export(self): if not is_torch_greater_or_equal_than_2_4: self.skipTest(reason="This test requires torch >= 2.4 to run.") distilbert_model = "distilbert-base-uncased" device = "cpu" attn_implementation = "sdpa" max_length = 64 tokenizer = AutoTokenizer.from_pretrained(distilbert_model) inputs = tokenizer( f"Paris is the {tokenizer.mask_token} of France.", return_tensors="pt", padding="max_length", max_length=max_length, ) model = DistilBertForMaskedLM.from_pretrained( distilbert_model, device_map=device, attn_implementation=attn_implementation, ) logits = model(**inputs).logits eager_predicted_mask = tokenizer.decode(logits[0, 4].topk(5).indices) self.assertEqual( eager_predicted_mask.split(), ["capital", "birthplace", "northernmost", "centre", "southernmost"], ) exported_program = torch.export.export( model, args=(inputs["input_ids"],), kwargs={"attention_mask": inputs["attention_mask"]}, strict=True, ) result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"]) exported_predicted_mask = tokenizer.decode(result.logits[0, 4].topk(5).indices) self.assertEqual(eager_predicted_mask, exported_predicted_mask)
transformers/tests/models/distilbert/test_modeling_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_distilbert.py", "repo_id": "transformers", "token_count": 8372 }
499
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from datasets import load_dataset from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import DPTImageProcessor if is_torchvision_available(): from transformers import DPTImageProcessorFast class DPTImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_reduce_labels = do_reduce_labels def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_reduce_labels": self.do_reduce_labels, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs def prepare_semantic_single_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") example = ds[0] return example["image"], example["map"] # Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs def prepare_semantic_batch_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") return list(ds["image"][:2]), list(ds["map"][:2]) @require_torch @require_vision class DPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DPTImageProcessor if is_vision_available() else None fast_image_processing_class = DPTImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = DPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisor")) self.assertTrue(hasattr(image_processing, "do_reduce_labels")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processing_class = image_processing_class(**self.image_processor_dict) image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_padding(self): for image_processing_class in self.image_processor_list: if image_processing_class == DPTImageProcessorFast: image = torch.arange(0, 366777, 1, dtype=torch.uint8).reshape(3, 249, 491) image_processor = image_processing_class(**self.image_processor_dict) padded_image = image_processor.pad_image(image, size_divisor=4) self.assertTrue(padded_image.shape[1] % 4 == 0) self.assertTrue(padded_image.shape[2] % 4 == 0) pixel_values = image_processor.preprocess( image, do_rescale=False, do_resize=False, do_pad=True, size_divisor=4, return_tensors="pt" ).pixel_values self.assertTrue(pixel_values.shape[2] % 4 == 0) self.assertTrue(pixel_values.shape[3] % 4 == 0) else: image_processor = image_processing_class(**self.image_processor_dict) image = np.random.randn(3, 249, 491) image = image_processor.pad_image(image, size_divisor=4) self.assertTrue(image.shape[1] % 4 == 0) self.assertTrue(image.shape[2] % 4 == 0) pixel_values = image_processor.preprocess( image, do_rescale=False, do_resize=False, do_pad=True, size_divisor=4, return_tensors="pt" ).pixel_values self.assertTrue(pixel_values.shape[2] % 4 == 0) self.assertTrue(pixel_values.shape[3] % 4 == 0) def test_keep_aspect_ratio(self): size = {"height": 512, "width": 512} for image_processing_class in self.image_processor_list: image_processor = image_processing_class(size=size, keep_aspect_ratio=True, ensure_multiple_of=32) image = np.zeros((489, 640, 3)) pixel_values = image_processor(image, return_tensors="pt").pixel_values self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672]) # Copied from transformers.tests.models.beit.test_image_processing_beit.BeitImageProcessingTest.test_call_segmentation_maps def test_call_segmentation_maps(self): for image_processing_class in self.image_processor_list: # Initialize image_processor image_processor = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched encoding = image_processor(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() encoding = image_processor(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = image_processor(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() encoding = image_processor(image, map, return_tensors="pt") labels_no_reduce = encoding["labels"].clone() self.assertTrue(labels_no_reduce.min().item() >= 0) self.assertTrue(labels_no_reduce.max().item() <= 150) # Get the first non-zero label coords and value, for comparison when do_reduce_labels is True non_zero_positions = (labels_no_reduce > 0).nonzero() first_non_zero_coords = tuple(non_zero_positions[0].tolist()) first_non_zero_value = labels_no_reduce[first_non_zero_coords].item() image_processor.do_reduce_labels = True encoding = image_processor(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Compare with non-reduced label to see if it's reduced by 1 self.assertEqual(encoding["labels"][first_non_zero_coords].item(), first_non_zero_value - 1) def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image, dummy_map = prepare_semantic_single_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1)) self.assertLessEqual( torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3 ) self.assertTrue(torch.allclose(image_encoding_slow.labels, image_encoding_fast.labels, atol=1e-1)) def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images, dummy_maps = prepare_semantic_batch_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) self.assertLessEqual( torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3 )
transformers/tests/models/dpt/test_image_processing_dpt.py/0
{ "file_path": "transformers/tests/models/dpt/test_image_processing_dpt.py", "repo_id": "transformers", "token_count": 7309 }
500
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ESM model.""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class EsmFoldModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=False, use_labels=False, vocab_size=19, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): esmfold_config = { "trunk": { "num_blocks": 2, "sequence_state_dim": 64, "pairwise_state_dim": 16, "sequence_head_width": 4, "pairwise_head_width": 4, "position_bins": 4, "chunk_size": 16, "structure_module": { "ipa_dim": 16, "num_angles": 7, "num_blocks": 2, "num_heads_ipa": 4, "pairwise_dim": 16, "resnet_dim": 16, "sequence_dim": 48, }, }, "fp16_esm": False, "lddt_head_hid_dim": 16, } config = EsmConfig( vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=True, esmfold_config=esmfold_config, ) return config def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = EsmForProteinFolding(config=config).float() model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.positions.shape, (2, self.batch_size, self.seq_length, 14, 3)) self.parent.assertEqual(result.angles.shape, (2, self.batch_size, self.seq_length, 7, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_mismatched_shapes = False all_model_classes = (EsmForProteinFolding,) if is_torch_available() else () pipeline_model_mapping = {} if is_torch_available() else {} test_sequence_classification_problem_types = False def setUp(self): self.model_tester = EsmFoldModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @is_flaky( description="The computed `s = s / norm_denom` in `EsmFoldAngleResnet` is numerically instable if `norm_denom` is very small." ) def test_batching_equivalence(self): super().test_batching_equivalence() @unittest.skip(reason="Does not support attention outputs") def test_attention_outputs(self): pass @unittest.skip def test_correct_missing_keys(self): pass @unittest.skip(reason="Esm does not support embedding resizing") def test_resize_embeddings_untied(self): pass @unittest.skip(reason="Esm does not support embedding resizing") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="ESMFold does not support passing input embeds!") def test_inputs_embeds(self): pass @unittest.skip(reason="ESMFold does not support head pruning.") def test_head_pruning(self): pass @unittest.skip(reason="ESMFold does not support head pruning.") def test_head_pruning_integration(self): pass @unittest.skip(reason="ESMFold does not support head pruning.") def test_head_pruning_save_load_from_config_init(self): pass @unittest.skip(reason="ESMFold does not support head pruning.") def test_head_pruning_save_load_from_pretrained(self): pass @unittest.skip(reason="ESMFold does not support head pruning.") def test_headmasking(self): pass @unittest.skip(reason="ESMFold does not output hidden states in the normal way.") def test_hidden_states_output(self): pass @unittest.skip(reason="ESMfold does not output hidden states in the normal way.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ESMFold only has one output format.") def test_model_outputs_equivalence(self): pass @unittest.skip(reason="ESMFold does not support input chunking.") def test_feed_forward_chunking(self): pass @unittest.skip( reason="ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def test_initialization(self): pass @unittest.skip(reason="ESMFold doesn't support torchscript compilation.") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="ESMFold doesn't support torchscript compilation.") def test_torchscript_output_hidden_state(self): pass @unittest.skip(reason="ESMFold doesn't support torchscript compilation.") def test_torchscript_simple(self): pass @unittest.skip(reason="ESMFold doesn't support data parallel.") def test_multi_gpu_data_parallel_forward(self): pass @require_torch class EsmModelIntegrationTest(TestCasePlus): @slow def test_inference_protein_folding(self): model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1").float() model.eval() input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) position_outputs = model(input_ids)["positions"] expected_slice = torch.tensor([2.5828, 0.7993, -10.9334], dtype=torch.float32) torch.testing.assert_close(position_outputs[0, 0, 0, 0], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/esm/test_modeling_esmfold.py/0
{ "file_path": "transformers/tests/models/esm/test_modeling_esmfold.py", "repo_id": "transformers", "token_count": 4427 }
501
# Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import timeout_decorator # noqa from parameterized import parameterized from transformers import FSMTConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer from transformers.models.fsmt.modeling_fsmt import ( SinusoidalPositionalEmbedding, _prepare_fsmt_decoder_inputs, invert_mask, shift_tokens_right, ) from transformers.pipelines import TranslationPipeline class FSMTModelTester: def __init__( self, parent, src_vocab_size=99, tgt_vocab_size=99, langs=["ru", "en"], batch_size=13, seq_length=7, is_training=False, use_labels=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, bos_token_id=0, pad_token_id=1, eos_token_id=2, ): self.parent = parent self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.langs = langs self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id torch.manual_seed(0) # hack needed for modeling_common tests - despite not really having this attribute in this model self.vocab_size = self.src_vocab_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp( 3, ) input_ids[:, -1] = 2 # Eos Token config = self.get_config() inputs_dict = prepare_fsmt_inputs_dict(config, input_ids) return config, inputs_dict def get_config(self): return FSMTConfig( vocab_size=self.src_vocab_size, # hack needed for common tests src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"] inputs_dict["use_cache"] = False return config, inputs_dict def prepare_fsmt_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_torch class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": FSMTModel, "summarization": FSMTForConditionalGeneration, "text2text-generation": FSMTForConditionalGeneration, "translation": FSMTForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = FSMTModelTester(self) self.langs = ["en", "ru"] config = { "langs": self.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } # XXX: hack to appease to all other models requiring `vocab_size` config["vocab_size"] = 99 # no such thing in FSMT self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config) def test_config(self): self.config_tester.run_common_tests() # XXX: override test_model_get_set_embeddings / different Embedding type def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.modules.sparse.Embedding)) def test_initialization_more(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config) model.to(torch_device) model.eval() # test init # self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item()) def _check_var(module): """Check that we initialized various parameters from N(0, config.init_std).""" self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2) _check_var(model.encoder.embed_tokens) _check_var(model.encoder.layers[0].self_attn.k_proj) _check_var(model.encoder.layers[0].fc1) # XXX: different std for fairseq version of SinusoidalPositionalEmbedding # self.assertAlmostEqual(torch.std(model.encoder.embed_positions.weights).item(), config.init_std, 2) def test_advanced_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_cache = False inputs_dict["input_ids"][:, -2:] = config.pad_token_id decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, inputs_dict["input_ids"] ) model = FSMTModel(config).to(torch_device).eval() decoder_features_with_created_mask = model(**inputs_dict)[0] decoder_features_with_passed_mask = model( decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict )[0] _assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask) useless_mask = torch.zeros_like(decoder_attn_mask) decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0] self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions self.assertEqual( decoder_features.size(), (self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size), ) if decoder_attn_mask.min().item() < -1e3: # some tokens were masked self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item()) # Test different encoder attention masks decoder_features_with_long_encoder_mask = model( inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long() )[0] _assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask) def test_save_load_missing_keys(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_ensure_weights_are_shared(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.tie_word_embeddings = True model = FSMTForConditionalGeneration(config) # FSMT shares three weights. # Not an issue to not have these correctly tied for torch.load, but it is an issue for safetensors. self.assertEqual( len( { model.get_output_embeddings().weight.data_ptr(), model.get_input_embeddings().weight.data_ptr(), model.base_model.decoder.output_projection.weight.data_ptr(), } ), 1, ) config.tie_word_embeddings = False model = FSMTForConditionalGeneration(config) # FSMT shares three weights. # Not an issue to not have these correctly tied for torch.load, but it is an issue for safetensors. self.assertEqual( len( { model.get_output_embeddings().weight.data_ptr(), model.get_input_embeddings().weight.data_ptr(), model.base_model.decoder.output_projection.weight.data_ptr(), } ), 2, ) @unittest.skip(reason="can't be implemented for FSMT due to dual vocab.") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Passing inputs_embeds not implemented for FSMT.") def test_inputs_embeds(self): pass @unittest.skip(reason="Input ids is required for FSMT.") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="model weights aren't tied in FSMT.") def test_tie_model_weights(self): pass @unittest.skip(reason="TODO: Decoder embeddings cannot be resized at the moment") def test_resize_embeddings_untied(self): pass @require_torch class FSMTHeadTests(unittest.TestCase): src_vocab_size = 99 tgt_vocab_size = 99 langs = ["ru", "en"] def _get_config(self): return FSMTConfig( src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = self._get_config() return config, input_ids, batch_size def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], dtype=torch.long, device=torch_device) config = self._get_config() lm_model = FSMTForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 new_input_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = FSMTForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = FSMTForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_prepare_fsmt_decoder_inputs(self): config, *_ = self._get_config_and_data() input_ids = _long_tensor([4, 4, 2]) decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]]) causal_mask_dtype = torch.float32 ignore = torch.finfo(causal_mask_dtype).min decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids, causal_mask_dtype=causal_mask_dtype ) expected_causal_mask = torch.tensor( [[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] # never attend to the final token, because its pad ).to(input_ids.device) self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size()) self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all()) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b aren't both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 pairs = [ ["en-ru"], ["ru-en"], ["en-de"], ["de-en"], ] @require_torch @require_sentencepiece @require_tokenizers class FSMTModelIntegrationTests(unittest.TestCase): tokenizers_cache = {} models_cache = {} default_mname = "facebook/wmt19-en-ru" @cached_property def default_tokenizer(self): return self.get_tokenizer(self.default_mname) @cached_property def default_model(self): return self.get_model(self.default_mname) def get_tokenizer(self, mname): if mname not in self.tokenizers_cache: self.tokenizers_cache[mname] = FSMTTokenizer.from_pretrained(mname) return self.tokenizers_cache[mname] def get_model(self, mname): if mname not in self.models_cache: # The safetensors checkpoint on `facebook/wmt19-de-en` (and other repositories) has issues. # Hub PRs are opened, see https://huggingface.co/facebook/wmt19-de-en/discussions/6 # We have asked Meta to merge them but no response yet: # https://huggingface.slack.com/archives/C01NE71C4F7/p1749565278015529?thread_ts=1749031628.757929&cid=C01NE71C4F7 # Below is what produced the Hub PRs that work (loading without safetensors, saving the reloading) model = FSMTForConditionalGeneration.from_pretrained(mname, use_safetensors=False) with tempfile.TemporaryDirectory() as tmpdir: model.save_pretrained(tmpdir) self.models_cache[mname] = FSMTForConditionalGeneration.from_pretrained(tmpdir).to(torch_device) if torch_device == "cuda": self.models_cache[mname].half() return self.models_cache[mname] @slow def test_inference_no_head(self): tokenizer = self.default_tokenizer model = FSMTModel.from_pretrained(self.default_mname).to(torch_device) src_text = "My friend computer will translate this for me" input_ids = tokenizer([src_text], return_tensors="pt")["input_ids"] input_ids = _long_tensor(input_ids).to(torch_device) inputs_dict = prepare_fsmt_inputs_dict(model.config, input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 10, model.config.tgt_vocab_size)) self.assertEqual(output.shape, expected_shape) # expected numbers were generated when en-ru model, using just fairseq's model4.pt # may have to adjust if switched to a different checkpoint expected_slice = torch.tensor( [[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]] ).to(torch_device) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def translation_setup(self, pair): text = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } src, tgt = pair.split("-") print(f"Testing {src} -> {tgt}") mname = f"facebook/wmt19-{pair}" src_text = text[src] tgt_text = text[tgt] # To make `test_translation_pipeline_0_en_ru` pass in #38904. When translating it back to `en`, we get # `Machine learning is fine, isn't it?`. if (src, tgt) == ("en", "ru"): tgt_text = "Машинное обучение - это прекрасно, не так ли?" tokenizer = self.get_tokenizer(mname) model = self.get_model(mname) return tokenizer, model, src_text, tgt_text @parameterized.expand(pairs) @slow def test_translation_direct(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) input_ids = tokenizer.encode(src_text, return_tensors="pt").to(torch_device) outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) assert decoded == tgt_text, f"\n\ngot: {decoded}\nexp: {tgt_text}\n" @parameterized.expand(pairs) @slow def test_translation_pipeline(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=torch_device) output = pipeline([src_text]) self.assertEqual([tgt_text], [x["translation_text"] for x in output]) @require_torch class TestSinusoidalPositionalEmbeddings(unittest.TestCase): padding_idx = 1 tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = SinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6, padding_idx=self.padding_idx).to( torch_device ) emb1.make_weight(*emb1.weight.shape, emb1.padding_idx) emb = emb1(input_ids) desired_weights = torch.tensor( [ [9.0930e-01, 1.9999e-02, 2.0000e-04, -4.1615e-01, 9.9980e-01, 1.0000e00], [1.4112e-01, 2.9995e-02, 3.0000e-04, -9.8999e-01, 9.9955e-01, 1.0000e00], ] ).to(torch_device) self.assertTrue( torch.allclose(emb[0], desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_odd_embed_dim(self): # odd embedding_dim is allowed test = SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=self.padding_idx).to( torch_device ) test.make_weight(*test.weight.shape, test.padding_idx) # odd num_embeddings is allowed test = SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to( torch_device ) test.make_weight(*test.weight.shape, test.padding_idx) @unittest.skip(reason="different from marian (needs more research)") def test_positional_emb_weights_against_marian(self): desired_weights = torch.tensor( [ [0, 0, 0, 0, 0], [0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374], [0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258], ] ) emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=self.padding_idx).to( torch_device ) emb1.make_weight(*emb1.weight.shape, emb1.padding_idx) weights = emb1.weights.data[:3, :5] # XXX: only the 1st and 3rd lines match - this is testing against # verbatim copy of SinusoidalPositionalEmbedding from fairseq self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) # test that forward pass is just a lookup, there is no ignore padding logic input_ids = torch.tensor( [[4, 10, self.padding_idx, self.padding_idx, self.padding_idx]], dtype=torch.long, device=torch_device ) no_cache_pad_zero = emb1(input_ids)[0] # XXX: only the 1st line matches the 3rd torch.testing.assert_close( torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], rtol=1e-3, atol=1e-3 )
transformers/tests/models/fsmt/test_modeling_fsmt.py/0
{ "file_path": "transformers/tests/models/fsmt/test_modeling_fsmt.py", "repo_id": "transformers", "token_count": 11387 }
502
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Gemma3 model.""" import logging import tempfile import unittest import pytest from parameterized import parameterized from transformers import ( AutoModelForCausalLM, AutoTokenizer, Gemma3Config, Gemma3TextConfig, is_torch_available, ) from transformers.testing_utils import ( Expectations, cleanup, is_flash_attn_2_available, require_deterministic_for_xpu, require_flash_attn, require_read_token, require_torch, require_torch_accelerator, require_torch_large_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...models.gemma.test_modeling_gemma import GemmaModelTester from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import ( Gemma3ForCausalLM, Gemma3ForConditionalGeneration, Gemma3ForSequenceClassification, Gemma3Model, Gemma3Processor, Gemma3TextModel, ) from transformers.pytorch_utils import is_torch_greater_or_equal class Gemma3ModelTester(GemmaModelTester): if is_torch_available(): config_class = Gemma3TextConfig model_class = Gemma3TextModel for_causal_lm_class = Gemma3ForCausalLM @require_torch class Gemma3ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (Gemma3TextModel, Gemma3ForCausalLM) if is_torch_available() else () all_generative_model_classes = (Gemma3ForCausalLM,) if is_torch_available() else () test_headmasking = False test_pruning = False _is_stateful = True model_split_percents = [0.5, 0.6] def setUp(self): self.model_tester = Gemma3ModelTester(self) self.config_tester = ConfigTester(self, config_class=Gemma3Config, hidden_size=37) @unittest.skip("Failing because of unique cache (HybridCache)") def test_model_outputs_equivalence(self, **kwargs): pass @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate @unittest.skip("Gemma3 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_matches_greedy_search(self, assistant_type): pass @unittest.skip("Gemma3 has HybridCache which is not compatible with assisted decoding") def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type): pass @pytest.mark.generate @unittest.skip("Gemma3 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_sample(self): pass @unittest.skip("Gemma3 has HybridCache which is not compatible with dola decoding") def test_dola_decoding_sample(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support continue from past kv") def test_generate_continue_from_past_key_values(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support low_memory generation") def test_beam_search_low_memory(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_low_memory(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.") def test_generate_with_static_cache(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.") def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip("Gemma3 has HybridCache which auto-compiles. Compile and FA2 don't work together.") def test_eager_matches_fa2_generate(self): pass @unittest.skip( reason="HybridCache can't be gathered because it is not iterable. Adding a simple iter and dumping `distributed_iterator`" " as in Dynamic Cache doesn't work. NOTE: @gante all cache objects would need better compatibility with multi gpu setting" ) def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip("Gemma3 applies key/query norm which doesn't work with packing") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Gemma3 applies key/query norm which doesn't work with packing") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass def test_generation_beyond_sliding_window_tiny_model(self): """Test generation with a tiny randomly initialised model whose input length is larger than the `sliding_window`. The model is configured with both `full_attention` and `sliding_attention` layers to make sure the hybrid cache and mask slicing logic is covered. """ config = Gemma3TextConfig.from_pretrained("hf-internal-testing/tiny-random-Gemma3ForCausalLM") config.attn_implementation = "eager" config.layer_types = ["full_attention", "sliding_attention"] config.sliding_window = 8 config.max_position_embeddings = 128 model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-Gemma3ForCausalLM", config=config ).to(torch_device) input_len = 10 input_ids = torch.tensor( [ [42300, 241087, 255445, 81315, 193760, 184471, 67719, 98191, 210651, 124725], [102294, 205314, 226646, 62020, 60245, 68025, 251839, 114053, 4695, 175511], ], device=torch_device, ) attention_mask = torch.ones_like(input_ids).to(torch_device) with torch.no_grad(): _ = model.generate( input_ids, attention_mask=attention_mask, max_new_tokens=1, do_sample=False, use_cache=True, cache_implementation="hybrid", ) # 2 generations are needed to trigger https://github.com/huggingface/transformers/issues/39711 # Since it requires model._cache to have been previously initialized output = model.generate( input_ids, attention_mask=attention_mask, max_new_tokens=5, do_sample=False, use_cache=True, cache_implementation="hybrid", ) generated_sequences = output[:, input_len:].cpu() EXPECTED_OUTPUT = torch.tensor([[90109, 90109, 90109, 83191, 83191], [246901, 69832, 69832, 69832, 62288]]) torch.testing.assert_close(generated_sequences, EXPECTED_OUTPUT) class Gemma3Vision2TextModelTester: def __init__( self, parent, mm_tokens_per_image=2, image_token_index=4, boi_token_index=5, eoi_token_index=6, seq_length=25, is_training=True, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_channels": 3, "is_training": True, "hidden_size": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, use_cache=False, ): self.parent = parent # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.mm_tokens_per_image = mm_tokens_per_image self.image_token_index = image_token_index self.boi_token_index = boi_token_index self.eoi_token_index = eoi_token_index self.llm_tester = Gemma3ModelTester(self.parent) self.text_config = self.llm_tester.get_config() self.vision_config = vision_config self.seq_length = seq_length self.pad_token_id = self.text_config.pad_token_id self.num_hidden_layers = self.text_config.num_hidden_layers self.vocab_size = self.text_config.vocab_size self.hidden_size = self.text_config.hidden_size self.num_attention_heads = self.text_config.num_attention_heads self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.encoder_seq_length = seq_length self.use_cache = use_cache def get_config(self): return Gemma3Config( text_config=self.text_config, vision_config=self.vision_config, image_token_index=self.image_token_index, boi_token_index=self.boi_token_index, eoi_token_index=self.eoi_token_index, mm_tokens_per_image=self.mm_tokens_per_image, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(self.pad_token_id).to(torch_device) # set the 3 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, :1] = config.image_token_index token_type_ids = torch.zeros_like(input_ids) token_type_ids[input_ids == config.image_token_index] = 1 inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } return config, inputs_dict @require_torch class Gemma3Vision2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ( Gemma3Model, Gemma3ForConditionalGeneration, Gemma3ForSequenceClassification, ) if is_torch_available() else () ) all_generative_model_classes = (Gemma3ForConditionalGeneration,) if is_torch_available() else () test_headmasking = False test_pruning = False test_missing_keys = False _is_stateful = True model_split_percents = [0.5, 0.6] # MP works but offload doesn't work when the SigLIP MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = Gemma3Vision2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Gemma3Config, hidden_size=37) def test_bidirectional_image_attention(self): """ Tests that each image can attend to itself bidirectionally. However an image cannot attend to future images, even within the same batch. """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config._attn_implementation = "eager" model = Gemma3Model(config).to(torch_device) # First let's pass inputs without change which is one image per text and manipulate # `token_type_ids` to make sure bidirectional mask is applied where it has to be inputs_dict["token_type_ids"] = torch.zeros_like(inputs_dict["token_type_ids"]) inputs_dict["token_type_ids"][:, :4] = 1 # unmask first 4 tokens with torch.no_grad(): out = model(**inputs_dict, output_attentions=True) # We expect a non-causal mask on first 4 tokens, thus no zeros for attention in out.attentions: self.assertTrue((attention[..., :4, :4] != 0).all().item()) # Now when removing `token_type_ids`, we will get simple causal mask inputs_dict["token_type_ids"][:, :4] = 0 # mask back first 4 tokens with torch.no_grad(): out = model(**inputs_dict, output_attentions=True) # We expect a causal mask on first 4 tokens, thus no zeros for attention in out.attentions: self.assertFalse((attention[..., :4, :4] != 0).all().item()) # Let's add two "images" per text, first one spanning 4 tokens and last one 3 tokens inputs_dict["token_type_ids"][:, :4] = 1 inputs_dict["token_type_ids"][:, 7:10] = 1 with torch.no_grad(): out = model(**inputs_dict, output_attentions=True) for attention in out.attentions: self.assertTrue((attention[..., :4, :4] != 0).all().item()) self.assertTrue((attention[..., 7:10, 7:10] != 0).all().item()) # We expect a non-causal mask only within same image and no looking ahead to the future self.assertTrue((attention[..., :4, 7:10] == 0).all().item()) @unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="HybridCache can't be gathered because it is not iterable. Adding a simple iter and dumping `distributed_iterator`" " as in Dynamic Cache doesn't work. NOTE: @gante all cache objects would need better compatibility with multi gpu setting" ) def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip("Failing because of unique cache (HybridCache)") def test_model_outputs_equivalence(self, **kwargs): pass @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate @unittest.skip("Gemma3 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_matches_greedy_search(self, assistant_type): pass @unittest.skip("Gemma3 has HybridCache which is not compatible with assisted decoding") def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type): pass @pytest.mark.generate @unittest.skip("Gemma3 has HybridCache which is not compatible with assisted decoding") def test_assisted_decoding_sample(self): pass @unittest.skip("Gemma3 has HybridCache which is not compatible with dola decoding") def test_dola_decoding_sample(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support continue from past kv") def test_generate_continue_from_past_key_values(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support low_memory generation") def test_beam_search_low_memory(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support contrastive generation") def test_contrastive_generate_low_memory(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.") def test_generate_with_static_cache(self): pass @unittest.skip("Gemma3 has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.") def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip("Gemma3 has HybridCache which auto-compiles. Compile and FA2 don't work together.") def test_eager_matches_fa2_generate(self): pass @unittest.skip( reason="Siglip (vision backbone) uses the same initialization scheme as the Flax original implementation" ) def test_initialization(self): pass @unittest.skip("Loading nested configs with overwritten `kwargs` isn't supported yet, FIXME @raushan.") def test_load_with_mismatched_shapes(self): pass @unittest.skip("Loading nested configs with overwritten `kwargs` isn't supported yet, FIXME @raushan.") def test_mismatched_shapes_have_properly_initialized_weights(self): pass def test_automodelforcausallm(self): """ Regression test for #36741/#36917 -- make sure `AutoModelForCausalLM` works with a Gemma3 config, i.e. that `AutoModelForCausalLM.from_pretrained` pulls the text config before loading the model """ config = self.model_tester.get_config() model = Gemma3ForConditionalGeneration(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) for_causal_lm = AutoModelForCausalLM.from_pretrained(tmp_dir) self.assertIsInstance(for_causal_lm, Gemma3ForConditionalGeneration) @slow @require_torch_accelerator @require_read_token class Gemma3IntegrationTest(unittest.TestCase): def setUp(self): self.processor = Gemma3Processor.from_pretrained("google/gemma-3-4b-it", padding_side="left") url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" self.messages = [ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]}, { "role": "user", "content": [ {"type": "image", "url": url}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] def tearDown(self): cleanup(torch_device, gc_collect=True) @require_deterministic_for_xpu def test_model_4b_bf16(self): model_id = "google/gemma-3-4b-it" model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) inputs = self.processor.apply_chat_template( self.messages, tokenize=True, return_dict=True, return_tensors="pt", add_generation_prompt=True, ).to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_TEXTS = Expectations( { ("xpu", 3): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach with turquoise water in the background. It looks like a lovely,'], ("cuda", 8): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear turquoise water and a blue sky in the background. It looks like'], ("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear blue water and a blue sky in the background. It looks like'], ("rocm", (9, 5)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant coastline in the background. It looks'], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(output_text, EXPECTED_TEXT) @require_torch_large_accelerator @require_deterministic_for_xpu def test_model_4b_batch(self): model_id = "google/gemma-3-4b-it" model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) messages_2 = [ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]}, { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", }, {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "Are these images identical?"}, ], }, ] inputs = self.processor.apply_chat_template( [self.messages, messages_2], tokenize=True, return_dict=True, return_tensors="pt", padding=True, add_generation_prompt=True, ).to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_TEXTS = Expectations( { ("xpu", 3): [ 'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. It looks like a very sunny and', 'user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. They depict very different scenes:\n\n* **Image 1** shows a cow standing on a beach.', ], ("cuda", 8): [ 'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear blue water and a blue sky in the background. It looks like', "user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a brown" ], ("rocm", (9, 4)): [ 'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear turquoise water and a blue sky in the background. It looks like', "user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a cow" ], ("rocm", (9, 5)): [ 'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. There are some clouds in the blue', 'user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. They depict very different scenes. \n\n* **Image 1** shows a cow standing on a beach', ], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(output_text, EXPECTED_TEXT) @require_torch_large_accelerator def test_model_4b_crops(self): model_id = "google/gemma-3-4b-it" model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) crop_config = { "images_kwargs": { "do_pan_and_scan": True, "pan_and_scan_max_num_crops": 448, "pan_and_scan_min_crop_size": 32, "pan_and_scan_min_ratio_to_activate": 0.3, } } inputs = self.processor.apply_chat_template( self.messages, tokenize=True, return_dict=True, return_tensors="pt", add_generation_prompt=True, **crop_config, ).to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_NUM_IMAGES = 3 # one for the origin image and two crops of images EXPECTED_TEXTS = Expectations( { ("xpu", 3): ['user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There are clouds in the blue sky above.'], ("cuda", 7): [], ("cuda", 8): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the"], ("rocm", (9, 4)): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a blue sky with some white clouds in the background"] } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(len(inputs["pixel_values"]), EXPECTED_NUM_IMAGES) self.assertEqual(output_text, EXPECTED_TEXT) @require_torch_large_accelerator @require_deterministic_for_xpu def test_model_4b_batch_crops(self): model_id = "google/gemma-3-4b-it" model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) crop_config = { "images_kwargs": { "do_pan_and_scan": True, "pan_and_scan_max_num_crops": 448, "pan_and_scan_min_crop_size": 32, "pan_and_scan_min_ratio_to_activate": 0.3, } } messages_2 = [ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]}, { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", }, {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "Are these images identical?"}, ], }, ] inputs = self.processor.apply_chat_template( [self.messages, messages_2], tokenize=True, return_dict=True, return_tensors="pt", padding=True, add_generation_prompt=True, **crop_config, ).to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_NUM_IMAGES = 9 # 3 * (one for the origin image and two crops of images) = 9 EXPECTED_TEXTS = Expectations( { ("xpu", 3): [ 'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There are clouds in the blue sky above.', 'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a', ], ("cuda", 7): [], ("cuda", 8): [ "user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the", 'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a' ], ("rocm", (9, 4)) : [ "user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a blue sky with some white clouds in the background", 'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a' ], ("rocm", (9, 5)) : [ 'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There are clouds in the blue sky above.', 'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a', ], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(len(inputs["pixel_values"]), EXPECTED_NUM_IMAGES) self.assertEqual(output_text, EXPECTED_TEXT) @require_torch_large_accelerator def test_model_4b_multiimage(self): model_id = "google/gemma-3-4b-it" model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) messages = [ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]}, { "role": "user", "content": [ {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "What do you see here?"}, ], }, ] inputs = self.processor.apply_chat_template( messages, tokenize=True, return_dict=True, return_tensors="pt", padding=True, add_generation_prompt=True, ).to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_TEXTS = Expectations( { ("xpu", 3): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image!\n\nHere's a description of the scene:\n\n* **Chinese Arch"], ("cuda", 7): [], ("cuda", 8): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Overall Scene:**\n\nIt looks like a street scene in a vibrant,"], ("rocm", (9, 4)): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Main Features:**\n\n* **Chinese Archway:** The most prominent"], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(output_text, EXPECTED_TEXT) @require_deterministic_for_xpu def test_model_1b_text_only(self): model_id = "google/gemma-3-1b-it" model = Gemma3ForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") inputs = tokenizer("Write a poem about Machine Learning.", return_tensors="pt").to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = tokenizer.batch_decode(output, skip_special_tokens=True) EXPECTED_TEXTS = Expectations( { ("xpu", 3): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a river deep,\nWith patterns hidden, secrets sleep.\nA neural net, a watchful eye,\nLearning'], ("cuda", 7): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a silent stream,\nInto the neural net, a waking dream.\nAlgorithms hum, a coded grace,\n'], ("cuda", 8): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a silent stream,\nInto the neural net, a waking dream.\nAlgorithms hum, a coded grace,\n'], ("rocm", (9, 4)): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a silent stream,\nInto the neural net, a waking dream.\nAlgorithms hum, a coded grace,\n'], ("rocm", (9, 5)): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a river deep,\nWith patterns hidden, secrets sleep.\nA neural net, a watchful eye,\nLearning'], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(output_text, EXPECTED_TEXT) # TODO: raushan FA2 generates gibberish for no reason, check later @require_flash_attn @require_torch_large_accelerator @pytest.mark.flash_attn_test def test_model_4b_flash_attn(self): model_id = "google/gemma-3-4b-it" model = Gemma3ForConditionalGeneration.from_pretrained( model_id, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ).to(torch_device) inputs = self.processor.apply_chat_template( self.messages, tokenize=True, return_dict=True, return_tensors="pt", add_generation_prompt=True, ).to(torch_device) # cache_implementation="hybrid" an in the original transformers implementation output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="hybrid") output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_TEXTS = Expectations( { ("xpu", 3): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant island in the background. It looks like a sunny day'], ("cuda", 7): [], ("cuda", 8): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant island in the background. It looks like a sunny day'], ("rocm", (9, 5)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with a turquoise ocean and a distant island in the background. It looks like a sunny'], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() self.assertEqual(output_text, EXPECTED_TEXT) @parameterized.expand([("flash_attention_2",), ("sdpa",), ("eager",)]) def test_generation_beyond_sliding_window(self, attn_implementation: str): """Test that we can correctly generate beyond the sliding window. This is non trivial as we need to correctly slice the attention mask in all cases (because we use a HybridCache). Outputs for every attention functions should be coherent and identical. """ model_id = "google/gemma-3-1b-it" if attn_implementation == "flash_attention_2" and not is_flash_attn_2_available(): self.skipTest("FlashAttention2 is required for this test.") input_text = [ "This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens "A list of colors: red, blue", # This will almost all be padding tokens ] tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left") inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained( model_id, attn_implementation=attn_implementation, dtype=torch.float16 ).to(torch_device) # Make sure prefill is larger than sliding window input_size = inputs.input_ids.shape[-1] self.assertTrue(input_size > model.config.sliding_window) # cache_implementation="hybrid" an in the original transformers implementation out = model.generate(**inputs, max_new_tokens=20, do_sample=False, cache_implementation="hybrid")[ :, input_size: ] output_text = tokenizer.batch_decode(out) EXPECTED_COMPLETIONS = [" and I'm going to take a walk.\n\nI really enjoy the scenery, and I'", ", green, yellow, orange, purple, brown, black, white, gray.\n\nI'"] # fmt: skip self.assertEqual(output_text, EXPECTED_COMPLETIONS) @pytest.mark.torch_export_test def test_export_text_only_with_hybrid_cache(self): if not is_torch_greater_or_equal("2.6.0"): self.skipTest(reason="This test requires torch >= 2.6 to run.") from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM model_id = "google/gemma-3-1b-it" model = AutoModelForCausalLM.from_pretrained(model_id) self.assertEqual(model.config.cache_implementation, "hybrid") # Export + HybridCache model.eval() exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) logging.info(f"\nExported program: {exported_program}") # Test generation with the exported model prompt = "What is the capital of France?" max_new_tokens_to_generate = 20 # Generate text with the exported model tokenizer = AutoTokenizer.from_pretrained(model_id) export_generated_text = TorchExportableModuleForDecoderOnlyLM.generate( exported_program, tokenizer, prompt, max_new_tokens=max_new_tokens_to_generate ) logging.info(f"\nExport generated texts: '{export_generated_text}'") input_text = tokenizer(prompt, return_tensors="pt") with torch.no_grad(): eager_outputs = model.generate( **input_text, max_new_tokens=max_new_tokens_to_generate, do_sample=False, # Use greedy decoding to match the exported model cache_implementation="hybrid", ) eager_generated_text = tokenizer.decode(eager_outputs[0], skip_special_tokens=True) logging.info(f"\nEager generated texts: '{eager_generated_text}'") self.assertEqual(export_generated_text, eager_generated_text) def test_dynamic_sliding_window_is_default(self): """ Test that the dynamic sliding window cache (added in #40039) is the default cache implementation for Gemma3 models, despite the fact that Hub checkpoints may have `cache_implementation="hybrid"` (static sliding window). """ model_id = "google/gemma-3-1b-it" model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") # the default cache is static sliding window self.assertEqual(model.config.cache_implementation, "hybrid") self.assertEqual(model.generation_config.cache_implementation, "hybrid") tokenizer = AutoTokenizer.from_pretrained(model_id) prompt = "What is the capital of France?" model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device) foward_outputs = model(**model_inputs) self.assertIn("DynamicSlidingWindowLayer", str(foward_outputs.past_key_values)) generate_outputs = model.generate( **model_inputs, max_new_tokens=2, do_sample=False, return_dict_in_generate=True ) self.assertIn("DynamicSlidingWindowLayer", str(generate_outputs.past_key_values)) # If we manually specify the cache implementation = "hybrid", it will use the static sliding window cache generate_outputs = model.generate( **model_inputs, max_new_tokens=2, do_sample=False, return_dict_in_generate=True, cache_implementation="hybrid", ) self.assertNotIn("DynamicSlidingWindowLayer", str(generate_outputs.past_key_values))
transformers/tests/models/gemma3/test_modeling_gemma3.py/0
{ "file_path": "transformers/tests/models/gemma3/test_modeling_gemma3.py", "repo_id": "transformers", "token_count": 19077 }
503
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from parameterized import parameterized from transformers import GPTBigCodeConfig, is_torch_available from transformers.testing_utils import cleanup, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, GPT2TokenizerFast, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, ) from transformers.models.gpt_bigcode.modeling_gpt_bigcode import GPTBigCodeAttention class GPTBigCodeModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, multi_query=True, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 2 self.pad_token_id = vocab_size - 3 self.multi_query = multi_query def get_large_model_config(self): return GPTBigCodeConfig.from_pretrained("bigcode/gpt_bigcode-santacoder") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) return ( config, input_ids, input_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return GPTBigCodeConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_inner=self.intermediate_size, activation_function=self.hidden_act, resid_pdrop=self.hidden_dropout_prob, attn_pdrop=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, attention_softmax_in_fp32=False, scale_attention_softmax_in_fp32=False, multi_query=self.multi_query, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_gpt_bigcode_model(self, config, input_ids, input_mask, token_type_ids, *args): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_gpt_bigcode_model_past(self, config, input_ids, input_mask, token_type_ids, *args): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_bigcode_model_attention_mask_past( self, config, input_ids, input_mask, token_type_ids, *args ): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_gpt_bigcode_model_past_large_inputs( self, config, input_ids, input_mask, token_type_ids, *args ): model = GPTBigCodeModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask )["last_hidden_state"] output_from_past = model( next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past )["last_hidden_state"] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, token_type_ids, *args): model = GPTBigCodeForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, token_type_ids, *args, gradient_checkpointing=False ): model = GPTBigCodeForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_gpt_bigcode_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTBigCodeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_gpt_bigcode_for_token_classification( self, config, input_ids, input_mask, token_type_ids, mc_token_ids, sequence_labels, *args ): config.num_labels = self.num_labels model = GPTBigCodeForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_gpt_bigcode_weight_initialization(self, config, *args): model = GPTBigCodeModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer) for key in model.state_dict(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, } return config, inputs_dict @require_torch class GPTBigCodeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): # TODO: Update the tests to use valid pretrained models. all_model_classes = ( ( GPTBigCodeModel, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": GPTBigCodeModel, "text-classification": GPTBigCodeForSequenceClassification, "text-generation": GPTBigCodeForCausalLM, "token-classification": GPTBigCodeForTokenClassification, "zero-shot": GPTBigCodeForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = False test_missing_keys = False test_pruning = False test_torchscript = False multi_query = True # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = GPTBigCodeModelTester(self, multi_query=self.multi_query) self.config_tester = ConfigTester(self, config_class=GPTBigCodeConfig, n_embd=37) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MQA models does not support retain_grad") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Contrastive search not supported due to non-standard caching mechanism") def test_contrastive_generate(self): pass @unittest.skip(reason="Contrastive search not supported due to non-standard caching mechanism") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="CPU offload seems to be broken for some reason - tiny models keep hitting corner cases") def test_cpu_offload(self): pass @unittest.skip(reason="Disk offload seems to be broken for some reason - tiny models keep hitting corner cases") def test_disk_offload(self): pass @unittest.skip(reason="BigCodeGPT has a non-standard KV cache format.") def test_past_key_values_format(self): pass @unittest.skip(reason="BigCodeGPT has a non-standard KV cache format and breaks this test.") def test_generate_continue_from_inputs_embeds(self): pass def test_gpt_bigcode_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model(*config_and_inputs) def test_gpt_bigcode_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model_past(*config_and_inputs) def test_gpt_bigcode_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model_attention_mask_past(*config_and_inputs) def test_gpt_bigcode_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model_past_large_inputs(*config_and_inputs) def test_gpt_bigcode_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_gpt_bigcode_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_for_sequence_classification(*config_and_inputs) def test_gpt_bigcode_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_for_token_classification(*config_and_inputs) def test_gpt_bigcode_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_gpt_bigcode_scale_attn_by_inverse_layer_idx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(scale_attn_by_inverse_layer_idx=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt_bigcode_reorder_and_upcast_attn(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(reorder_and_upcast_attn=True) self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs) def test_gpt_bigcode_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_weight_initialization(*config_and_inputs) @require_torch class GPTBigCodeMHAModelTest(GPTBigCodeModelTest): # `parameterized_class` breaks with mixins, so we use inheritance instead multi_query = False @slow @require_torch class GPTBigCodeModelLanguageGenerationTest(unittest.TestCase): def test_generate_simple(self): model = GPTBigCodeForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder").to(torch_device) tokenizer = GPT2TokenizerFast.from_pretrained("bigcode/gpt_bigcode-santacoder") input_ids = tokenizer("def print_hello_world():", return_tensors="pt").input_ids.to(torch_device) output_sequence = model.generate(input_ids) output_sentence = tokenizer.decode(output_sequence[0], skip_special_tokens=True) expected_output = 'def print_hello_world():\n print("Hello World!")\n\n\ndef print_hello_world_with_args(name' # fmt: skip self.assertEqual(output_sentence, expected_output) def test_generate_batched(self): tokenizer = GPT2TokenizerFast.from_pretrained("bigcode/gpt_bigcode-santacoder") tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" model = GPTBigCodeForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder").to(torch_device) inputs = tokenizer(["def print_hello_world():", "def say_hello():"], return_tensors="pt", padding=True).to( torch_device ) outputs = model.generate(**inputs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) expected_output = [ 'def print_hello_world():\n print("Hello World!")\n\n\ndef print_hello_world_with_args(name', 'def say_hello():\n print("Hello, World!")\n\n\nsay_hello()\n', ] self.assertListEqual(outputs, expected_output) def test_newline_regression(self): """Added to prevent regressions regarding attention (scaling) indicated by excessive newlines""" tokenizer = AutoTokenizer.from_pretrained("bigcode/tiny_starcoder_py") model = GPTBigCodeForCausalLM.from_pretrained("bigcode/tiny_starcoder_py").to(torch_device) input_ids = tokenizer( "Analyze the impact of the COVID-19 pandemic on global economic structures and future business models.\n", return_tensors="pt", ).input_ids.to(torch_device) output_sequence = model.generate(input_ids, max_new_tokens=20, do_sample=False) output_sentence = tokenizer.decode(output_sequence[0], skip_special_tokens=True) expected_output = 'Analyze the impact of the COVID-19 pandemic on global economic structures and future business models.\n\nThe impact of the COVID-19 pandemic on global economic structures and future business' # fmt: skip self.assertEqual(output_sentence, expected_output) @require_torch class GPTBigCodeMQATest(unittest.TestCase): def get_attention(self, multi_query): config = GPTBigCodeConfig.from_pretrained( "bigcode/gpt_bigcode-santacoder", multi_query=multi_query, attn_pdrop=0, resid_pdrop=0, ) # We need to set it here as it's normally set by the Model's __init__ config._attn_implementation = "sdpa" return GPTBigCodeAttention(config) @parameterized.expand([(seed, is_train_mode) for seed in range(5) for is_train_mode in [True, False]]) def test_mqa_reduces_to_mha(self, seed, is_train_mode=True): torch.manual_seed(seed) # CREATE MQA AND MHA ATTENTIONS attention_mqa = self.get_attention(True) attention_mha = self.get_attention(False) # ENFORCE MATCHING WEIGHTS num_heads = attention_mqa.num_heads embed_dim = attention_mqa.embed_dim head_dim = attention_mqa.head_dim with torch.no_grad(): mqa_q_weight = attention_mqa.c_attn.weight[:embed_dim, :].view(num_heads, 1, head_dim, embed_dim) mqa_kv_weight = attention_mqa.c_attn.weight[embed_dim:, :].view(1, 2, head_dim, embed_dim) mha_c_weight = torch.cat( [mqa_q_weight, mqa_kv_weight.expand(num_heads, 2, head_dim, embed_dim)], dim=1 ).view(3 * num_heads * head_dim, embed_dim) mqa_q_bias = attention_mqa.c_attn.bias[:embed_dim].view(num_heads, 1, head_dim) mqa_kv_bias = attention_mqa.c_attn.bias[embed_dim:].view(1, 2, head_dim) mha_c_bias = torch.cat([mqa_q_bias, mqa_kv_bias.expand(num_heads, 2, head_dim)], dim=1).view( 3 * num_heads * head_dim ) attention_mha.c_attn.weight.copy_(mha_c_weight) attention_mha.c_attn.bias.copy_(mha_c_bias) attention_mha.c_proj.weight.copy_(attention_mqa.c_proj.weight) attention_mha.c_proj.bias.copy_(attention_mqa.c_proj.bias) # PUT THE MODEL INTO THE CORRECT MODE attention_mha.train(is_train_mode) attention_mqa.train(is_train_mode) # RUN AN INPUT THROUGH THE MODELS num_tokens = 5 hidden_states = torch.randn(1, num_tokens, embed_dim) attention_mha_result = attention_mha(hidden_states)[0] attention_mqa_result = attention_mqa(hidden_states)[0] # CHECK THAT ALL OUTPUTS ARE THE SAME torch.testing.assert_close(attention_mha_result, attention_mqa_result, rtol=1e-5, atol=1e-5)
transformers/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py/0
{ "file_path": "transformers/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py", "repo_id": "transformers", "token_count": 11543 }
504
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Helium model.""" import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available from transformers.testing_utils import ( Expectations, require_read_token, require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester if is_torch_available(): import torch from transformers import ( HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification, HeliumModel, ) class HeliumModelTester(GemmaModelTester): if is_torch_available(): config_class = HeliumConfig model_class = HeliumModel for_causal_lm_class = HeliumForCausalLM for_sequence_class = HeliumForSequenceClassification for_token_class = HeliumForTokenClassification @require_torch class HeliumModelTest(GemmaModelTest, unittest.TestCase): all_model_classes = ( (HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": HeliumModel, "text-classification": HeliumForSequenceClassification, "token-classification": HeliumForTokenClassification, "text-generation": HeliumForCausalLM, "zero-shot": HeliumForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False _is_stateful = True model_split_percents = [0.5, 0.6] def setUp(self): self.model_tester = HeliumModelTester(self) self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37) @slow # @require_torch_gpu class HeliumIntegrationTest(unittest.TestCase): input_text = ["Hello, today is a great day to"] @require_read_token def test_model_2b(self): model_id = "kyutai/helium-1-preview" expected_texts = Expectations( { ("rocm", (9, 5)): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now, and I"], (None, None): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"], ("cuda", 8): ['Hello, today is a great day to start a new project. I have been working on a new project for a while now, and I'], } ) # fmt: skip EXPECTED_TEXTS = expected_texts.get_expectation() model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, revision="refs/pr/1").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1") inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS)
transformers/tests/models/helium/test_modeling_helium.py/0
{ "file_path": "transformers/tests/models/helium/test_modeling_helium.py", "repo_id": "transformers", "token_count": 1487 }
505
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_torchvision, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_torchvision_available(): from torchvision import transforms if is_vision_available(): from PIL import Image from transformers import IdeficsImageProcessor class IdeficsImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, size=None, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], ): size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution # self.size = size self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "image_size": self.image_size, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to IdeficsImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: size = self.image_size image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return (self.num_channels, height, width) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class IdeficsImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = IdeficsImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = IdeficsImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "image_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertNotEqual(image_processor.image_size, 30) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, image_size=42) self.assertEqual(image_processor.image_size, 42) @require_torchvision def test_torchvision_numpy_transforms_equivalency(self): # as we had to reimplement the torchvision transforms using transformers utils we must check # they both do the same image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) image_processor = self.image_processing_class(**self.image_processor_dict, return_tensors="pt") print(image_inputs) def convert_to_rgb(image): # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background # for transparent images. The call to `alpha_composite` handles this case if image.mode == "RGB": return image image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") return alpha_composite image_size = image_processor.image_size image_mean = image_processor.image_mean image_std = image_processor.image_std transform = transforms.Compose( [ convert_to_rgb, transforms.Resize((image_size, image_size), interpolation=transforms.InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=image_mean, std=image_std), ] ) pixel_values_transform_implied = image_processor(image_inputs, transform=None, return_tensors="pt") pixel_values_transform_supplied = image_processor(image_inputs, transform=transform, return_tensors="pt") torch.testing.assert_close(pixel_values_transform_implied, pixel_values_transform_supplied, rtol=0.0, atol=0.0) @unittest.skip(reason="not supported") def test_call_numpy(self): pass @unittest.skip(reason="not supported") def test_call_numpy_4_channels(self): pass @unittest.skip(reason="not supported") def test_call_pil(self): pass @unittest.skip(reason="not supported") def test_call_pytorch(self): pass
transformers/tests/models/idefics/test_image_processing_idefics.py/0
{ "file_path": "transformers/tests/models/idefics/test_image_processing_idefics.py", "repo_id": "transformers", "token_count": 3273 }
506
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import JanusImageProcessor if is_torchvision_available(): from transformers import JanusImageProcessorFast class JanusImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=384, min_resolution=30, max_resolution=200, do_resize=True, size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): size = size if size is not None else {"height": 384, "width": 384} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class JanusImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = JanusImageProcessor if is_vision_available() else None fast_image_processing_class = JanusImageProcessorFast if is_torchvision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->Janus def setUp(self): super().setUp() self.image_processor_tester = JanusImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 384, "width": 384}) self.assertEqual(image_processor.image_mean, [0.48145466, 0.4578275, 0.40821073]) image_processor = image_processing_class.from_dict( self.image_processor_dict, size=42, image_mean=[1.0, 2.0, 1.0] ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.image_mean, [1.0, 2.0, 1.0]) def test_call_pil(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test Non batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images. encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 384, 384) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch. image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 384, 384) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of input format. self.assertTrue((encoded_images_nested == encoded_images).all()) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors=None) encoding_fast = image_processor_fast(dummy_images, return_tensors=None) # Overwrite as the outputs are not always all of the same shape (kept for BC) for i in range(len(encoding_slow.pixel_values)): self._assert_slow_fast_tensors_equivalence( torch.from_numpy(encoding_slow.pixel_values[i]), encoding_fast.pixel_values[i] ) @require_vision @require_torch def test_slow_fast_equivalence_postprocess(self): dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) dummy_images = [image / 255.0 for image in dummy_images] image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow.postprocess(dummy_images, return_tensors=None) encoding_fast = image_processor_fast.postprocess(dummy_images, return_tensors=None) # Overwrite as the outputs are not always all of the same shape (kept for BC) for i in range(len(encoding_slow.pixel_values)): self._assert_slow_fast_tensors_equivalence( torch.from_numpy(encoding_slow.pixel_values[i]).float(), encoding_fast.pixel_values[i].float() ) @unittest.skip(reason="Not supported") def test_call_numpy_4_channels(self): pass
transformers/tests/models/janus/test_image_processing_janus.py/0
{ "file_path": "transformers/tests/models/janus/test_image_processing_janus.py", "repo_id": "transformers", "token_count": 4634 }
507
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Llama4 model.""" import unittest from transformers import is_torch_available from transformers.testing_utils import ( Expectations, cleanup, require_read_token, require_torch_large_accelerator, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( Llama4ForConditionalGeneration, Llama4Processor, ) @slow @require_torch_large_accelerator @require_read_token class Llama4IntegrationTest(unittest.TestCase): model_id = "meta-llama/Llama-4-Scout-17B-16E" @classmethod def setUpClass(cls): cls.model = Llama4ForConditionalGeneration.from_pretrained( "meta-llama/Llama-4-Scout-17B-16E", device_map="auto", dtype=torch.float32, attn_implementation="eager", ) def setUp(self): self.processor = Llama4Processor.from_pretrained("meta-llama/Llama-4-Scout-17B-16E", padding_side="left") url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" self.messages_1 = [ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]}, { "role": "user", "content": [ {"type": "image", "url": url}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] self.messages_2 = [ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]}, { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", }, {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "Are these images identical?"}, ], }, ] def tearDown(self): cleanup(torch_device, gc_collect=True) def test_model_17b_16e_fp32(self): EXPECTED_TEXTS = Expectations( { ("xpu", 3): ['system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach with a blue sky and a body of water in the background. The cow is brown with a white face'], ("cuda", None): ['system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white'], } ) # fmt: skip EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() inputs = self.processor.apply_chat_template( self.messages_1, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True ).to(device=torch_device, dtype=self.model.dtype) output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False) output_text = self.processor.batch_decode(output, skip_special_tokens=True) print(output_text) self.assertEqual(output_text, EXPECTED_TEXT) def test_model_17b_16e_batch(self): inputs = self.processor.apply_chat_template( [self.messages_1, self.messages_2], tokenize=True, return_dict=True, return_tensors="pt", padding=True, add_generation_prompt=True, ).to(device=torch_device, dtype=torch.float32) output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False) output_text = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_TEXTS = [ 'system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white', 'system\n\nYou are a helpful assistant.user\n\nAre these images identical?assistant\n\nNo, these images are not identical. The first image shows a cow standing on a beach with a blue sky and a white cloud in the background.' ] # fmt: skip self.assertEqual(output_text, EXPECTED_TEXTS)
transformers/tests/models/llama4/test_modeling_llama4.py/0
{ "file_path": "transformers/tests/models/llama4/test_modeling_llama4.py", "repo_id": "transformers", "token_count": 2207 }
508
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch M2M100 model.""" import copy import tempfile import unittest import pytest from transformers import M2M100Config, is_torch_available from transformers.testing_utils import ( require_flash_attn, require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, require_torch_gpu, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import M2M100ForConditionalGeneration, M2M100Model, M2M100Tokenizer from transformers.models.m2m_100.modeling_m2m_100 import M2M100Decoder, M2M100Encoder def prepare_m2m_100_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class M2M100ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, encoder_layerdrop=0.0, decoder_layerdrop=0.0, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input input_ids = input_ids.clamp(self.pad_token_id + 1) decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1) config = self.get_config() inputs_dict = prepare_m2m_100_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return M2M100Config( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, encoder_layerdrop=self.encoder_layerdrop, decoder_layerdrop=self.decoder_layerdrop, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = M2M100Model(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = M2M100Model(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = M2M100Encoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = M2M100Decoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( M2M100Model, M2M100ForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": M2M100Model, "summarization": M2M100ForConditionalGeneration, "text2text-generation": M2M100ForConditionalGeneration, "translation": M2M100ForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def setUp(self): self.model_tester = M2M100ModelTester(self) self.config_tester = ConfigTester(self, config_class=M2M100Config) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (M2M100Model, M2M100ForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = M2M100ForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @unittest.skip( reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class M2M100ModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") def test_inference_no_head(self): model = M2M100Model.from_pretrained("facebook/m2m100_418M").to(torch_device) input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]]], device=torch_device, ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_inference_head(self): model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device) # change to intended input input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]]) decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]]) inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]]], device=torch_device, ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_seq_to_seq_generation(self): model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device) tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="fr", tgt_lang="en") src_fr = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tokenizer(src_fr, padding=True, return_tensors="pt") hypotheses_batch = model.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=5, forced_bos_token_id=tokenizer.get_lang_id("en"), ) expected_en = [ "</s> __en__ " "The NSA case highlights the total absence of intelligence debate" "</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "</s> __en__ " "I think there are two levels of response from the French government." "</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "</s> __en__ " "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France." "</s>", ] generated = tokenizer.batch_decode(hypotheses_batch) assert generated == expected_en @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_seq_to_seq_generation(self): """ Overwriting the common test as the test is flaky on tiny models """ model = M2M100ForConditionalGeneration.from_pretrained( "facebook/m2m100_418M", attn_implementation="flash_attention_2", dtype=torch.float16 ).to(torch_device) tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="fr", tgt_lang="en") src_fr = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tokenizer(src_fr, padding=True, return_tensors="pt") hypotheses_batch = model.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=5, forced_bos_token_id=tokenizer.get_lang_id("en"), ) expected_en = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] generated = tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == expected_en
transformers/tests/models/m2m_100/test_modeling_m2m_100.py/0
{ "file_path": "transformers/tests/models/m2m_100/test_modeling_m2m_100.py", "repo_id": "transformers", "token_count": 8724 }
509
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Mask2Former model.""" import unittest import numpy as np import pytest from tests.test_modeling_common import floats_tensor from transformers import AutoModelForImageClassification, Mask2FormerConfig, is_torch_available, is_vision_available from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import ( Expectations, require_timm, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Mask2FormerForUniversalSegmentation, Mask2FormerModel if is_vision_available(): from transformers import Mask2FormerImageProcessor if is_vision_available(): from PIL import Image class Mask2FormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 8, max_size=32 * 8, num_labels=4, hidden_dim=64, num_attention_heads=4, num_hidden_layers=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.hidden_dim = hidden_dim self.mask_feature_size = hidden_dim self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def get_config(self): config = Mask2FormerConfig( hidden_size=self.hidden_dim, num_attention_heads=self.num_attention_heads, num_hidden_layers=self.num_hidden_layers, encoder_feedforward_dim=16, dim_feedforward=32, num_queries=self.num_queries, num_labels=self.num_labels, decoder_layers=2, encoder_layers=2, feature_size=16, ) config.num_queries = self.num_queries config.num_labels = self.num_labels config.backbone_config.embed_dim = 16 config.backbone_config.depths = [1, 1, 1, 1] config.backbone_config.hidden_size = 16 config.backbone_config.num_channels = self.num_channels config.backbone_config.num_heads = [1, 1, 2, 2] config.backbone = None config.hidden_dim = self.hidden_dim config.mask_feature_size = self.hidden_dim config.feature_size = self.hidden_dim return config def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers) def create_and_check_mask2former_model(self, config, pixel_values, pixel_mask, output_hidden_states=False): with torch.no_grad(): model = Mask2FormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, pixel_mask=pixel_mask) output = model(pixel_values, output_hidden_states=True) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_mask2former_instance_segmentation_head_model( self, config, pixel_values, pixel_mask, mask_labels, class_labels ): model = Mask2FormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) comm_check_on_output(result) result = model( pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([])) @require_torch class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else () pipeline_model_mapping = {"image-feature-extraction": Mask2FormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False test_torch_exportable = True def setUp(self): self.model_tester = Mask2FormerModelTester(self) self.config_tester = ConfigTester(self, config_class=Mask2FormerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_mask2former_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=False) def test_mask2former_instance_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mask2former_instance_segmentation_head_model(*config_and_inputs) @unittest.skip(reason="Mask2Former does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Mask2Former is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Mask2Former does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass @slow def test_model_from_pretrained(self): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: model = Mask2FormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } config = self.model_tester.get_config() model = Mask2FormerForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() model = model_class(config) model.to(torch_device) model.train() loss = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels).loss loss.backward() def test_retain_grad_hidden_states_attentions(self): model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True model = model_class(config).to(torch_device) model.train() outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() attentions = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if ( "self_attn.sampling_offsets.bias" in name or "self_attn.value_proj.weight" in name or "self_attn.output_proj.weight" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_initialization_pretrained_backbone(self): backbone_name = "microsoft/resnet-18" # load Mask2Former config with a pretrained backbone config = Mask2FormerConfig( backbone=backbone_name, use_pretrained_backbone=True, ) # load pretrained backbone backbone_model = AutoModelForImageClassification.from_pretrained(backbone_name, device_map=torch_device) def params_match(params1, params2): return all((p1 == p2).all() for p1, p2 in zip(params1, params2)) for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertTrue( params_match( backbone_model.base_model.encoder.parameters(), model.pixel_level_module.encoder.encoder.parameters(), ) ) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertTrue( params_match( backbone_model.base_model.encoder.parameters(), model.model.pixel_level_module.encoder.encoder.parameters(), ) ) TOLERANCE = 2e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class Mask2FormerModelIntegrationTest(unittest.TestCase): @cached_property def model_checkpoints(self): return "facebook/mask2former-swin-small-coco-instance" @cached_property def default_image_processor(self): return Mask2FormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def test_inference_no_head(self): model = Mask2FormerModel.from_pretrained(self.model_checkpoints).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 384, 384)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [ [-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197], ] ).to(torch_device) torch.testing.assert_close( outputs.encoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE, ) expectations = Expectations( { (None, None): [ [0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951], ], ("cuda", 8): [ [0.8974, 1.1848, 1.1777], [1.1933, 1.5041, 1.5128], [1.1154, 1.4487, 1.4950], ], } ) expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE,rtol=TOLERANCE) # fmt: skip expectations = Expectations( { (None, None): [ [2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999], ], ("cuda", 8): [ [2.1153, 1.7004, -0.8604], [1.5807, 1.8007, -0.9354], [1.6040, 1.7498, -0.6001], ], } ) expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) # fmt: skip def test_inference_universal_segmentation_head(self): model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 384, 384)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) expectations = Expectations( { (None, None): [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ], ("cuda", 8): [ [-8.7839, -9.0056, -8.8122], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3428, -6.4675], ], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1)) expectations = Expectations( { (None, None): [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ], ("cuda", 8): [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close( outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE ) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) .to(torch_device, dtype=torch.float16) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor inputs = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None) @pytest.mark.torch_export_test def test_export(self): if not is_torch_greater_or_equal_than_2_4: self.skipTest(reason="This test requires torch >= 2.4 to run.") model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) exported_program = torch.export.export( model, args=(inputs["pixel_values"], inputs["pixel_mask"]), strict=True, ) with torch.no_grad(): eager_outputs = model(**inputs) exported_outputs = exported_program.module().forward(inputs["pixel_values"], inputs["pixel_mask"]) self.assertEqual(eager_outputs.masks_queries_logits.shape, exported_outputs.masks_queries_logits.shape) torch.testing.assert_close( eager_outputs.masks_queries_logits, exported_outputs.masks_queries_logits, rtol=TOLERANCE, atol=TOLERANCE ) self.assertEqual(eager_outputs.class_queries_logits.shape, exported_outputs.class_queries_logits.shape) torch.testing.assert_close( eager_outputs.class_queries_logits, exported_outputs.class_queries_logits, rtol=TOLERANCE, atol=TOLERANCE )
transformers/tests/models/mask2former/test_modeling_mask2former.py/0
{ "file_path": "transformers/tests/models/mask2former/test_modeling_mask2former.py", "repo_id": "transformers", "token_count": 11395 }
510
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MLCD model.""" import unittest import requests from PIL import Image from transformers import ( AutoProcessor, MLCDVisionConfig, MLCDVisionModel, is_torch_available, ) from transformers.testing_utils import ( require_torch, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor if is_torch_available(): import torch class MLCDVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in MLCD, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return MLCDVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = MLCDVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MLCDVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `MLCDVisionModel`. """ all_model_classes = (MLCDVisionModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False test_resize_embeddings = False test_torch_exportable = True def setUp(self): self.model_tester = MLCDVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=MLCDVisionConfig, has_text_modality=False) def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, torch.nn.Linear)) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad and "class_pos_emb" not in name: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_torch class MLCDVisionModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "DeepGlint-AI/mlcd-vit-bigG-patch14-448" model = MLCDVisionModel.from_pretrained(model_name).to(torch_device) processor = AutoProcessor.from_pretrained(model_name) # process single image url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt") # move inputs to the same device as the model inputs = {k: v.to(torch_device) for k, v in inputs.items()} # forward pass with torch.no_grad(): outputs = model(**inputs, output_attentions=True) last_hidden_state = outputs.last_hidden_state last_attention = outputs.attentions[-1] # verify the shapes of last_hidden_state and last_attention self.assertEqual( last_hidden_state.shape, torch.Size([1, 1025, 1664]), ) self.assertEqual( last_attention.shape, torch.Size([1, 16, 1025, 1025]), ) # verify the values of last_hidden_state and last_attention # fmt: off expected_partial_5x5_last_hidden_state = torch.tensor( [ [-0.8978, -0.1181, 0.4769, 0.4761, -0.5779], [ 0.2640, -2.6150, 0.4853, 0.5743, -1.1003], [ 0.3314, -0.3328, -0.4305, -0.1874, -0.7701], [-1.5174, -1.0238, -1.1854, 0.1749, -0.8786], [ 0.2323, -0.8346, -0.9680, -0.2951, 0.0867], ] ).to(torch_device) expected_partial_5x5_last_attention = torch.tensor( [ [2.0930e-01, 6.3073e-05, 1.4717e-03, 2.6881e-05, 3.0513e-03], [1.5828e-04, 2.1056e-03, 4.6784e-04, 1.8276e-03, 5.3233e-04], [5.7824e-04, 1.1446e-03, 1.3854e-03, 1.1775e-03, 1.2750e-03], [9.6343e-05, 1.6365e-03, 2.9066e-04, 3.1089e-03, 2.0607e-04], [6.2688e-04, 1.1656e-03, 1.5030e-03, 8.2819e-04, 2.6992e-03], ] ).to(torch_device) # fmt: on torch.testing.assert_close( last_hidden_state[0, :5, :5], expected_partial_5x5_last_hidden_state, rtol=1e-3, atol=1e-3 ) torch.testing.assert_close( last_attention[0, 0, :5, :5], expected_partial_5x5_last_attention, rtol=1e-4, atol=1e-4 )
transformers/tests/models/mlcd/test_modeling_mlcd.py/0
{ "file_path": "transformers/tests/models/mlcd/test_modeling_mlcd.py", "repo_id": "transformers", "token_count": 3818 }
511
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import GemmaTokenizer, PaliGemmaProcessor from transformers.testing_utils import get_tests_dir, require_torch, require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import SiglipImageProcessor SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_vision class PaliGemmaProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = PaliGemmaProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384") image_processor.image_seq_length = 0 # TODO: raushan fix me in #37342 tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]}) processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.image_token @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) @require_torch @require_vision def test_image_seq_length(self): input_str = "lower newer" image_input = self.prepare_image_inputs() image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length") image_processor.image_seq_length = 14 processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length" ) self.assertEqual(len(inputs["input_ids"][0]), 112) @require_torch def test_call_with_suffix(self): input_str = "lower newer" suffix = "upper older longer string" image_input = self.prepare_image_inputs() processor = self.get_processor() inputs = processor(text=input_str, images=image_input, suffix=suffix) self.assertTrue("labels" in inputs) self.assertEqual(len(inputs["labels"][0]), len(inputs["input_ids"][0])) inputs = processor(text=input_str, images=image_input, suffix=suffix, return_tensors="pt") self.assertTrue("labels" in inputs) self.assertEqual(len(inputs["labels"][0]), len(inputs["input_ids"][0])) def test_text_with_image_tokens(self): image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) text_multi_images = "<image><image>Dummy text!" text_single_image = "<image>Dummy text!" text_no_image = "Dummy text!" image = self.prepare_image_inputs() out_noimage = processor(text=text_no_image, images=image, return_tensors="np") out_singlimage = processor(text=text_single_image, images=image, return_tensors="np") for k in out_noimage: self.assertTrue(out_noimage[k].tolist() == out_singlimage[k].tolist()) out_multiimages = processor(text=text_multi_images, images=[image, image], return_tensors="np") out_noimage = processor(text=text_no_image, images=[[image, image]], return_tensors="np") # We can't be sure what is users intention, whether user want "one text + two images" or user forgot to add the second text with self.assertRaises(ValueError): out_noimage = processor(text=text_no_image, images=[image, image], return_tensors="np") for k in out_noimage: self.assertTrue(out_noimage[k].tolist() == out_multiimages[k].tolist()) text_batched = ["Dummy text!", "Dummy text!"] text_batched_with_image = ["<image>Dummy text!", "<image>Dummy text!"] out_images = processor(text=text_batched_with_image, images=[image, image], return_tensors="np") out_noimage_nested = processor(text=text_batched, images=[[image], [image]], return_tensors="np") out_noimage = processor(text=text_batched, images=[image, image], return_tensors="np") for k in out_noimage: self.assertTrue(out_noimage[k].tolist() == out_images[k].tolist() == out_noimage_nested[k].tolist())
transformers/tests/models/paligemma/test_processing_paligemma.py/0
{ "file_path": "transformers/tests/models/paligemma/test_processing_paligemma.py", "repo_id": "transformers", "token_count": 2193 }
512
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PhiMoE model.""" import unittest from transformers import PhimoeConfig, StaticCache, is_torch_available from transformers.testing_utils import ( require_torch, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( AutoTokenizer, PhimoeForCausalLM, PhimoeForSequenceClassification, PhimoeModel, ) end_of_text_token = 32000 class PhimoeMiniWithStaticCache(torch.nn.Module): def __init__(self, model: PhimoeForCausalLM, batch_size: int, max_seq_len: int): super().__init__() self.model = model self.cache = StaticCache(config=model.config, max_cache_len=max_seq_len) def forward( self, input_ids: torch.LongTensor = None, ) -> torch.FloatTensor: return self.model.forward( input_ids=input_ids, use_cache=True, return_dict=True, past_key_values=self.cache, ).logits @staticmethod def generate(model: PhimoeForCausalLM, prompt_tokens: torch.LongTensor, max_seq_len: int) -> list[int]: model = PhimoeMiniWithStaticCache(model, 1, max_seq_len + prompt_tokens.shape[-1]) response_tokens = [] for input_pos in range(prompt_tokens.shape[-1]): result = model.forward( input_ids=prompt_tokens[:, input_pos : input_pos + 1], ) response_tokens.append(prompt_tokens[0][input_pos].item()) current_token = torch.argmax(result[:, -1, :], dim=-1).item() response_tokens.append(current_token) while current_token != end_of_text_token and len(response_tokens) < max_seq_len: result = model.forward( input_ids=torch.tensor([[current_token]], dtype=torch.long), ) current_token = torch.argmax(result[:, -1, :], dim=-1).item() response_tokens.append(current_token) return response_tokens class PhimoeModelTester(CausalLMModelTester): if is_torch_available(): config_class = PhimoeConfig base_model_class = PhimoeModel causal_lm_class = PhimoeForCausalLM sequence_class = PhimoeForSequenceClassification @require_torch class PhimoeModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( (PhimoeModel, PhimoeForCausalLM, PhimoeForSequenceClassification) if is_torch_available() else () ) test_headmasking = False test_pruning = False test_all_params_have_gradient = False model_tester_class = PhimoeModelTester pipeline_model_mapping = ( { "feature-extraction": PhimoeModel, "text-classification": PhimoeForSequenceClassification, "text-generation": PhimoeForCausalLM, "zero-shot": PhimoeForSequenceClassification, } if is_torch_available() else {} ) # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79292/workflows/fa2ba644-8953-44a6-8f67-ccd69ca6a476/jobs/1012905 def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True @slow @require_torch class PhimoeIntegrationTest(unittest.TestCase): def test_model_phimoe_instruct_logits(self): input_ids = { "input_ids": torch.tensor( [[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device ) } model = PhimoeForCausalLM.from_pretrained("microsoft/Phi-3.5-MoE-instruct").to(torch_device) model.eval() output = model(**input_ids).logits EXPECTED_OUTPUT = torch.tensor([[-3.5312, -2.5000, -1.2734, 0.3555, -0.7578, -0.4727, 0.5977, -0.4316, 0.2256, -1.2188, -1.6797, 0.9961, 3.7656, 11.3125, -1.3828, -4.8438, -5.7500, -1.9375, 0.7227, -0.3438, -0.2100, -0.4277, -0.0444, -0.5352, -0.6406, -0.1016, -0.4258, -1.0234, 0.4297, -0.6250], [-0.9883, 0.1455, -0.4902, 2.3594, 0.7031, 3.1406, 0.4375, 0.2559, 0.6172, -2.1094, -1.3359, 2.5938, 4.9062, 10.8125, -0.1094, 1.5781, -4.9375, 0.7148, -0.0972, 1.7656, -0.0801, 0.2217, 0.1875, -0.4629, 1.5781, 0.3535, 0.0874, 0.6836, -0.0518, -1.2969]]).to(torch_device) # fmt: skip torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4) def test_phimoe_instruct_generation(self): model = PhimoeForCausalLM.from_pretrained("microsoft/Phi-3.5-MoE-instruct") tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-MoE-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") outputs = model.generate(inputs, max_new_tokens=32) output_text = tokenizer.batch_decode(outputs) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits are both delicious and nutritious fruits that can be combined in various ways to create tast" ] self.assertListEqual(output_text, EXPECTED_OUTPUT) def test_phimoe_instruct_with_static_cache(self): model = PhimoeForCausalLM.from_pretrained("microsoft/Phi-3.5-MoE-instruct") tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-MoE-instruct") messages = [ { "role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.", }, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, ] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") response_tokens = PhimoeMiniWithStaticCache.generate(model, inputs, 64) output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device)) EXPECTED_OUTPUT = [ "<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits are both delicious and nutritious fruits that can" ] self.assertListEqual(output_text, EXPECTED_OUTPUT)
transformers/tests/models/phimoe/test_modeling_phimoe.py/0
{ "file_path": "transformers/tests/models/phimoe/test_modeling_phimoe.py", "repo_id": "transformers", "token_count": 3481 }
513
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PvtV2 model.""" import inspect import tempfile import unittest from transformers import PvtV2Backbone, PvtV2Config, is_torch_available, is_vision_available from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoImageProcessor, PvtV2ForImageClassification, PvtV2Model if is_vision_available(): from PIL import Image class PvtV2ConfigTester(ConfigTester): def run_common_tests(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class PvtV2ModelTester(ModelTesterMixin): def __init__( self, parent, batch_size=13, image_size=None, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], out_indices=[0, 1, 2, 3], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = 64 if image_size is None else image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.out_indices = out_indices self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return PvtV2Config( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, sr_ratios=self.sr_ratios, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = PvtV2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertIsNotNone(result.last_hidden_state) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class PvtV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PvtV2Model, PvtV2ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": PvtV2Model, "image-classification": PvtV2ForImageClassification} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_torchscript = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = PvtV2ModelTester(self) self.config_tester = PvtV2ConfigTester(self, config_class=PvtV2Config) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batching_equivalence(self, atol=5e-4, rtol=5e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="Pvt-V2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Pvt-V2 does not have get_input_embeddings method and get_output_embeddings methods") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="This architecture does not work with using reentrant.") def test_training_gradient_checkpointing(self): # Scenario - 1 default behaviour self.check_training_gradient_checkpointing() @unittest.skip(reason="This architecture does not work with using reentrant.") def test_training_gradient_checkpointing_use_reentrant(self): # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's # torch.utils.checkpoint.checkpoint self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = len(self.model_tester.depths) self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[self.model_tester.out_indices[0]], self.model_tester.image_size // 2 ** (2 + self.model_tester.out_indices[0]), self.model_tester.image_size // 2 ** (2 + self.model_tester.out_indices[0]), ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @slow def test_model_from_pretrained(self): model_name = "OpenGVLab/pvt_v2_b0" model = PvtV2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class PvtV2ModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification(self): # only resize + normalize image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0").to(torch_device).eval() image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_model(self): model = PvtV2Model.from_pretrained("OpenGVLab/pvt_v2_b0").to(torch_device).eval() image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values) # verify the logits expected_shape = torch.Size((1, 50, 512)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0", dtype=torch.float16) model.to(torch_device) image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device, dtype=torch.float16) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values) @require_torch class PvtV2BackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (PvtV2Backbone,) if is_torch_available() else () has_attentions = False config_class = PvtV2Config def test_config(self): config_class = self.config_class # test default config config = config_class() self.assertIsNotNone(config) num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers expected_stage_names = [f"stage{idx}" for idx in range(1, num_stages + 1)] self.assertEqual(config.stage_names, expected_stage_names) self.assertTrue(set(config.out_features).issubset(set(config.stage_names))) # Test out_features and out_indices are correctly set # out_features and out_indices both None config = config_class(out_features=None, out_indices=None) self.assertEqual(config.out_features, [config.stage_names[-1]]) self.assertEqual(config.out_indices, [len(config.stage_names) - 1]) # out_features and out_indices both set config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 1]) self.assertEqual(config.out_features, ["stage1", "stage2"]) self.assertEqual(config.out_indices, [0, 1]) # Only out_features set config = config_class(out_features=["stage2", "stage4"]) self.assertEqual(config.out_features, ["stage2", "stage4"]) self.assertEqual(config.out_indices, [1, 3]) # Only out_indices set config = config_class(out_indices=[0, 2]) self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]]) self.assertEqual(config.out_indices, [0, 2]) # Error raised when out_indices do not correspond to out_features with self.assertRaises(ValueError): config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2]) def test_config_save_pretrained(self): config_class = self.config_class config_first = config_class(out_indices=[0, 1, 2, 3]) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) # Fix issue where type switches in the saving process if isinstance(config_second.image_size, list): config_second.image_size = tuple(config_second.image_size) self.assertEqual(config_second.to_dict(), config_first.to_dict()) def setUp(self): self.model_tester = PvtV2ModelTester(self)
transformers/tests/models/pvt_v2/test_modeling_pvt_v2.py/0
{ "file_path": "transformers/tests/models/pvt_v2/test_modeling_pvt_v2.py", "repo_id": "transformers", "token_count": 6677 }
514
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import tempfile import unittest import numpy as np import requests from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import Qwen2VLImageProcessor if is_torchvision_available(): from transformers import Qwen2VLImageProcessorFast class Qwen2VLImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, num_frames=10, min_resolution=56, max_resolution=1024, min_pixels=56 * 56, max_pixels=28 * 28 * 1280, do_normalize=True, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, do_resize=True, patch_size=14, temporal_patch_size=2, merge_size=2, do_convert_rgb=True, ): self.parent = parent self.batch_size = batch_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_channels = num_channels self.num_frames = num_frames self.image_mean = OPENAI_CLIP_MEAN self.image_std = OPENAI_CLIP_STD self.min_pixels = min_pixels self.max_pixels = max_pixels self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.merge_size = merge_size self.do_resize = do_resize self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "image_mean": self.image_mean, "image_std": self.image_std, "min_pixels": self.min_pixels, "max_pixels": self.max_pixels, "patch_size": self.patch_size, "temporal_patch_size": self.temporal_patch_size, "merge_size": self.merge_size, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) return [[image] for image in images] def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_channels=self.num_channels, num_frames=self.num_frames, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class Qwen2VLImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Qwen2VLImageProcessor if is_vision_available() else None fast_image_processing_class = Qwen2VLImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Qwen2VLImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "min_pixels")) self.assertTrue(hasattr(image_processing, "max_pixels")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "patch_size")) self.assertTrue(hasattr(image_processing, "temporal_patch_size")) self.assertTrue(hasattr(image_processing, "merge_size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.min_pixels, 56 * 56) self.assertEqual(image_processor.max_pixels, 28 * 28 * 1280) image_processor = image_processing_class.from_dict( self.image_processor_dict, min_pixels=256 * 256, max_pixels=640 * 640 ) self.assertEqual(image_processor.min_pixels, 256 * 256) self.assertEqual(image_processor.max_pixels, 640 * 640) def test_select_best_resolution(self): # Test with a final resize resolution best_resolution = smart_resize(561, 278, factor=28) self.assertEqual(best_resolution, (560, 280)) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image[0], Image.Image) # Test not batched input process_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (4900, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image[0], np.ndarray) # Test not batched input process_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (4900, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image[0], torch.Tensor) # Test not batched input process_out = image_processing(image_inputs[0], return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (4900, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) @unittest.skip(reason="Qwen2VLImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images process_out = image_processing(image_inputs, return_tensors="pt") encoded_images = process_out.pixel_values image_grid_thws = process_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = image_inputs[:3] + image_inputs[3:] process_out = image_processing(image_inputs_nested, return_tensors="pt") encoded_images_nested = process_out.pixel_values image_grid_thws_nested = process_out.image_grid_thw expected_output_image_shape = (34300, 1176) expected_image_grid_thws = torch.Tensor([[1, 70, 70]] * 7) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) # Image processor should return same pixel values, independently of ipnut format self.assertTrue((encoded_images_nested == encoded_images).all()) self.assertTrue((image_grid_thws_nested == expected_image_grid_thws).all()) def test_video_inputs(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) expected_dims_by_frames = {1: 34300, 2: 34300, 3: 68600, 4: 68600, 5: 102900, 6: 102900} for num_frames, expected_dims in expected_dims_by_frames.items(): image_processor_tester = Qwen2VLImageProcessingTester(self, num_frames=num_frames) video_inputs = image_processor_tester.prepare_video_inputs(equal_resolution=True) process_out = image_processing(None, videos=video_inputs, return_tensors="pt") encoded_video = process_out.pixel_values_videos expected_output_video_shape = (expected_dims, 1176) self.assertEqual(tuple(encoded_video.shape), expected_output_video_shape) def test_custom_patch_size(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) for patch_size in (1, 3, 5, 7): image_processor_tester = Qwen2VLImageProcessingTester(self, patch_size=patch_size) video_inputs = image_processor_tester.prepare_video_inputs(equal_resolution=True) process_out = image_processing(None, videos=video_inputs, return_tensors="pt") encoded_video = process_out.pixel_values_videos expected_output_video_shape = (171500, 1176) self.assertEqual(tuple(encoded_video.shape), expected_output_video_shape) def test_custom_image_size(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processing.save_pretrained(tmpdirname) image_processor_loaded = image_processing_class.from_pretrained( tmpdirname, max_pixels=56 * 56, min_pixels=28 * 28 ) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) process_out = image_processor_loaded(image_inputs, return_tensors="pt") expected_output_video_shape = [112, 1176] self.assertListEqual(list(process_out.pixel_values.shape), expected_output_video_shape) def test_custom_pixels(self): pixel_choices = frozenset(itertools.product((100, 150, 200, 20000), (100, 150, 200, 20000))) for image_processing_class in self.image_processor_list: image_processor_dict = self.image_processor_dict.copy() for a_pixels, b_pixels in pixel_choices: image_processor_dict["min_pixels"] = min(a_pixels, b_pixels) image_processor_dict["max_pixels"] = max(a_pixels, b_pixels) image_processor = image_processing_class(**image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() # Just checking that it doesn't raise an error image_processor(image_inputs, return_tensors="pt") def test_temporal_padding(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # Create random video inputs with a number of frames not divisible by temporal_patch_size image_processor_tester = Qwen2VLImageProcessingTester(self, num_frames=5, temporal_patch_size=4) video_inputs = image_processor_tester.prepare_video_inputs(equal_resolution=True) # Process the video inputs process_out = image_processing(None, videos=video_inputs, return_tensors="pt") encoded_video = process_out.pixel_values_videos # Check the shape after padding expected_output_video_shape = (102900, 1176) # Adjusted based on padding self.assertEqual(tuple(encoded_video.shape), expected_output_video_shape) # Check divisibility by temporal_patch_size self.assertEqual(encoded_video.shape[0] % 4, 0) @require_vision @require_torch def test_slow_fast_equivalence(self): dummy_image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype) self._assert_slow_fast_tensors_equivalence( encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float() ) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self.assertEqual(encoding_slow.image_grid_thw.dtype, encoding_fast.image_grid_thw.dtype) self._assert_slow_fast_tensors_equivalence( encoding_slow.image_grid_thw.float(), encoding_fast.image_grid_thw.float() ) def test_get_num_patches_without_images(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={}) self.assertEqual(num_patches, 64) num_patches = image_processing.get_number_of_image_patches(height=200, width=50, images_kwargs={}) self.assertEqual(num_patches, 56) num_patches = image_processing.get_number_of_image_patches( height=100, width=100, images_kwargs={"patch_size": 28} ) self.assertEqual(num_patches, 16)
transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py/0
{ "file_path": "transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", "repo_id": "transformers", "token_count": 8700 }
515
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class ReformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/reformer-crime-and-punishment" tokenizer_class = ReformerTokenizer rust_tokenizer_class = ReformerTokenizerFast test_rust_tokenizer = True test_seq2seq = False test_sentencepiece = True @classmethod def setUpClass(cls): super().setUpClass() tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(cls.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 1_000) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) @unittest.skip(reason="Tokenizer has no padding token") def test_padding_different_model_input_name(self): pass def test_full_tokenizer(self): tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exist and be tokenized to <unk>, such as saoneuhaoesuth" ) original_tokenizer_encodings = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import ReformerConfig, ReformerModel # Build sequence first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt") batch_encoded_sequence = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt") config = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) config.axial_pos_shape = encoded_sequence["input_ids"].shape model = ReformerModel(config) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**encoded_sequence) model(**batch_encoded_sequence) @slow def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 sequences = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="google/reformer-crime-and-punishment", revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a", padding=False, sequences=sequences, )
transformers/tests/models/reformer/test_tokenization_reformer.py/0
{ "file_path": "transformers/tests/models/reformer/test_tokenization_reformer.py", "repo_id": "transformers", "token_count": 6429 }
516
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available() and is_torchvision_available(): from transformers import Sam2ImageProcessorFast class Sam2ImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, mask_size=None, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 20, "width": 20} mask_size = mask_size if mask_size is not None else {"height": 12, "width": 12} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.mask_size = mask_size self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "mask_size": self.mask_size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs def prepare_semantic_single_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") example = ds[0] return example["image"], example["map"] # Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs def prepare_semantic_batch_inputs(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") return list(ds["image"][:2]), list(ds["map"][:2]) @require_torch @require_vision class SamImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): fast_image_processing_class = Sam2ImageProcessorFast if is_torchvision_available() else None test_slow_image_processor = False def setUp(self): super().setUp() self.image_processor_tester = Sam2ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "mask_size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processing_class = image_processing_class(**self.image_processor_dict) image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_segmentation_maps(self): for image_processing_class in self.image_processor_list: # Initialize image_processor image_processor = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.mask_size["height"], self.image_processor_tester.mask_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched encoding = image_processor(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.mask_size["height"], self.image_processor_tester.mask_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() encoding = image_processor(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.mask_size["height"], self.image_processor_tester.mask_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = image_processor(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.image_processor_tester.mask_size["height"], self.image_processor_tester.mask_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255)
transformers/tests/models/sam2/test_image_processing_sam2.py/0
{ "file_path": "transformers/tests/models/sam2/test_image_processing_sam2.py", "repo_id": "transformers", "token_count": 4588 }
517
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest import numpy as np from transformers import Speech2TextFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class Speech2TextFeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=24, num_mel_bins=24, padding_value=0.0, sampling_rate=16_000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.num_mel_bins = num_mel_bins self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Speech2TextFeatureExtractor def setUp(self): self.feat_extract_tester = Speech2TextFeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_dither(self): np.random.seed(42) # seed the dithering randn() # Tests that features with and without little dithering are similar, but not the same dict_no_dither = self.feat_extract_tester.prepare_feat_extract_dict() dict_no_dither["dither"] = 0.0 dict_dither = self.feat_extract_tester.prepare_feat_extract_dict() dict_dither["dither"] = 1.0 feature_extractor_no_dither = self.feature_extraction_class(**dict_no_dither) feature_extractor_dither = self.feature_extraction_class(**dict_dither) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # compute features input_features_no_dither = feature_extractor_no_dither( np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_no_dither["sampling_rate"] ).input_features input_features_dither = feature_extractor_dither( np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_dither["sampling_rate"] ).input_features # test there is a difference between features (there's added noise to input signal) diff = input_features_dither - input_features_no_dither # features are not identical self.assertTrue(np.abs(diff).mean() > 1e-5) # features are not too different self.assertTrue(np.abs(diff).mean() <= 1e-3) self.assertTrue(np.abs(diff).max() <= 5e-2) def test_cepstral_mean_and_variance_normalization(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_trunc_max_length(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="max_length", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def test_cepstral_mean_and_variance_normalization_trunc_longest(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=4, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 4, 24)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="longest", max_length=16, truncation=True, return_tensors="np", return_attention_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape, (3, 6, 24)) def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): # fmt: off expected = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ]) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 584, 24)) self.assertTrue(np.allclose(input_features[0, 0, :30], expected, atol=1e-4)) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertDictEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertEqual(dict_first, dict_second) # exact same tests than before, except that we simulate that torchaudio is not available @require_torch @unittest.mock.patch( "transformers.models.speech_to_text.feature_extraction_speech_to_text.is_speech_available", lambda: False ) class Speech2TextFeatureExtractionWithoutTorchaudioTest(Speech2TextFeatureExtractionTest): def test_using_audio_utils(self): # Tests that it uses audio_utils instead of torchaudio feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) self.assertTrue(hasattr(feat_extract, "window")) self.assertTrue(hasattr(feat_extract, "mel_filters")) from transformers.models.speech_to_text.feature_extraction_speech_to_text import is_speech_available self.assertFalse(is_speech_available())
transformers/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py/0
{ "file_path": "transformers/tests/models/speech_to_text/test_feature_extraction_speech_to_text.py", "repo_id": "transformers", "token_count": 6957 }
518
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch StableLm model.""" import unittest import pytest from transformers import StableLmConfig, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( AutoTokenizer, StableLmForCausalLM, StableLmForSequenceClassification, StableLmForTokenClassification, StableLmModel, ) from transformers.models.stablelm.modeling_stablelm import StableLmRotaryEmbedding from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester class StableLmModelTester(CausalLMModelTester): if is_torch_available(): config_class = StableLmConfig base_model_class = StableLmModel causal_lm_class = StableLmForCausalLM sequence_class = StableLmForSequenceClassification token_class = StableLmForTokenClassification @require_torch class StableLmModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( StableLmModel, StableLmForCausalLM, StableLmForSequenceClassification, StableLmForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": StableLmModel, "text-classification": StableLmForSequenceClassification, "text-generation": StableLmForCausalLM, "zero-shot": StableLmForSequenceClassification, "token-classification": StableLmForTokenClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # Broken by attention refactor cc @Cyrilvallez model_tester_class = StableLmModelTester rotary_embedding_layer = StableLmRotaryEmbedding # Enables RoPE tests if set @require_torch class StableLmModelIntegrationTest(unittest.TestCase): @slow def test_model_stablelm_3b_4e1t_logits(self): input_ids = {"input_ids": torch.tensor([[510, 8588, 310, 1900, 9386]], dtype=torch.long, device=torch_device)} model = StableLmForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t").to(torch_device) model.eval() output = model(**input_ids).logits.float() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[2.7146, 2.4245, 1.5616, 1.4424, 2.6790]]).to(torch_device) torch.testing.assert_close(output.mean(dim=-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4) # Expected logits sliced from [0, 0, 0:30] EXPECTED_SLICE = torch.tensor([7.1030, -1.4195, 9.9206, 7.7008, 4.9891, 4.2169, 5.5426, 3.7878, 6.7593, 5.7360, 8.4691, 5.5448, 5.0544, 10.4129, 8.5573, 13.0405, 7.3265, 3.5868, 6.1106, 5.9406, 5.6376, 5.7490, 5.4850, 4.8124, 5.1991, 4.6419, 4.5719, 9.9588, 6.7222, 4.5070]).to(torch_device) # fmt: skip torch.testing.assert_close(output[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4) @slow def test_model_stablelm_3b_4e1t_generation(self): tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t") model = StableLmForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t") input_ids = tokenizer.encode( "My favorite food has always been pizza, but lately", return_tensors="pt", ) outputs = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(outputs[0], skip_special_tokens=True) EXPECTED_TEXT_COMPLETION = """My favorite food has always been pizza, but lately I’ve been craving something different. I’ve been trying to eat healthier and I’ve""" self.assertEqual(text, EXPECTED_TEXT_COMPLETION) @slow def test_model_tiny_random_stablelm_2_logits(self): # Check parallel residual and qk layernorm forward pass input_ids = {"input_ids": torch.tensor([[510, 8588, 310, 1900, 9386]], dtype=torch.long, device=torch_device)} model = StableLmForCausalLM.from_pretrained("stabilityai/tiny-random-stablelm-2").to(torch_device) model.eval() output = model(**input_ids).logits.float() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-2.7196, -3.6099, -2.6877, -3.1973, -3.9344]]).to(torch_device) torch.testing.assert_close(output.mean(dim=-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4) # Expected logits sliced from [0, 0, 0:30] EXPECTED_SLICE = torch.tensor([2.8364, 5.3811, 5.1659, 7.5485, 4.3219, 6.3315, 1.3967, 6.9147, 3.9679, 6.4786, 5.9176, 3.3067, 5.2917, 0.1485, 3.9630, 7.9947,10.6727, 9.6757, 8.8772, 8.3527, 7.8445, 6.6025, 5.5786, 7.0985,6.1369, 3.4259, 1.9397, 4.6157, 4.8105, 3.1768]).to(torch_device) # fmt: skip torch.testing.assert_close(output[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4) @slow def test_model_tiny_random_stablelm_2_generation(self): # Check parallel residual and qk layernorm generation tokenizer = AutoTokenizer.from_pretrained("stabilityai/tiny-random-stablelm-2") model = StableLmForCausalLM.from_pretrained("stabilityai/tiny-random-stablelm-2") input_ids = tokenizer.encode( "My favorite ride at the amusement park", return_tensors="pt", ) outputs = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(outputs[0], skip_special_tokens=True) EXPECTED_TEXT_COMPLETION = """My favorite ride at the amusement park is the 2000-mile roller coaster. It's a thrilling ride filled with roller coast""" self.assertEqual(text, EXPECTED_TEXT_COMPLETION) @require_bitsandbytes @slow @require_flash_attn @pytest.mark.flash_attn_test def test_model_3b_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [3, 3, 3] input_ids = [306, 338] * 2047 model = StableLmForCausalLM.from_pretrained( "stabilityai/stablelm-3b-4e1t", device_map="auto", dtype="auto", load_in_4bit=True, attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-3:].tolist())
transformers/tests/models/stablelm/test_modeling_stablelm.py/0
{ "file_path": "transformers/tests/models/stablelm/test_modeling_stablelm.py", "repo_id": "transformers", "token_count": 3158 }
519
# Copyright 2024 the Fast authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch TextNet model.""" import unittest import requests from PIL import Image from transformers import TextNetConfig from transformers.models.textnet.image_processing_textnet import TextNetImageProcessor from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import TextNetBackbone, TextNetForImageClassification, TextNetModel class TextNetConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class TextNetModelTester: def __init__( self, parent, stem_kernel_size=3, stem_stride=2, stem_in_channels=3, stem_out_channels=32, stem_act_func="relu", dropout_rate=0, ops_order="weight_bn_act", conv_layer_kernel_sizes=[ [[3, 3]], [[3, 3]], [[3, 3]], [[3, 3]], ], conv_layer_strides=[ [2], [2], [2], [2], ], out_features=["stage1", "stage2", "stage3", "stage4"], out_indices=[1, 2, 3, 4], batch_size=3, num_channels=3, image_size=[32, 32], is_training=True, use_labels=True, num_labels=3, hidden_sizes=[32, 32, 32, 32, 32], ): self.parent = parent self.stem_kernel_size = stem_kernel_size self.stem_stride = stem_stride self.stem_in_channels = stem_in_channels self.stem_out_channels = stem_out_channels self.act_func = stem_act_func self.dropout_rate = dropout_rate self.ops_order = ops_order self.conv_layer_kernel_sizes = conv_layer_kernel_sizes self.conv_layer_strides = conv_layer_strides self.out_features = out_features self.out_indices = out_indices self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.use_labels = use_labels self.num_labels = num_labels self.hidden_sizes = hidden_sizes self.num_stages = 5 def get_config(self): return TextNetConfig( stem_kernel_size=self.stem_kernel_size, stem_stride=self.stem_stride, stem_num_channels=self.stem_in_channels, stem_out_channels=self.stem_out_channels, act_func=self.act_func, dropout_rate=self.dropout_rate, ops_order=self.ops_order, conv_layer_kernel_sizes=self.conv_layer_kernel_sizes, conv_layer_strides=self.conv_layer_strides, out_features=self.out_features, out_indices=self.out_indices, hidden_sizes=self.hidden_sizes, image_size=self.image_size, ) def create_and_check_model(self, config, pixel_values, labels): model = TextNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) scale_h = self.image_size[0] // 32 scale_w = self.image_size[1] // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], scale_h, scale_w), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TextNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def create_and_check_backbone(self, config, pixel_values, labels): model = TextNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) scale_h = self.image_size[0] // 32 scale_w = self.image_size[1] // 32 self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 8 * scale_h, 8 * scale_w] ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = TextNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) scale_h = self.image_size[0] // 32 scale_w = self.image_size[1] // 32 self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[0], scale_h, scale_w] ) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class TextNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some tests of test_modeling_common.py, as TextNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TextNetModel, TextNetForImageClassification, TextNetBackbone) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TextNetModel, "image-classification": TextNetForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True has_attentions = False def setUp(self): self.model_tester = TextNetModelTester(self) self.config_tester = TextNetConfigTester(self, config_class=TextNetConfig, has_text_modality=False) @unittest.skip(reason="TextNet does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="TextNet does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="TextNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TextNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states self.assertEqual(len(hidden_states), self.model_tester.num_stages) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size[0] // 2, self.model_tester.image_size[1] // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="TextNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "czczup/textnet-base" model = TextNetModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch @require_vision class TextNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): processor = TextNetImageProcessor.from_pretrained("czczup/textnet-base") model = TextNetModel.from_pretrained("czczup/textnet-base").to(torch_device) # prepare image url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): output = model(**inputs) # verify output self.assertEqual(output.last_hidden_state.shape, torch.Size([1, 512, 20, 27])) expected_slice_backbone = torch.tensor( [ [0.0000, 1.7415, 1.2660], [0.0000, 1.0084, 1.9692], [0.0000, 1.7464, 1.7892], ], device=torch_device, ) torch.testing.assert_close( output.last_hidden_state[0, 12, :3, :3], expected_slice_backbone, rtol=1e-2, atol=1e-2 ) @require_torch # Copied from tests.models.bit.test_modeling_bit.BitBackboneTest with Bit->TextNet class TextNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (TextNetBackbone,) if is_torch_available() else () config_class = TextNetConfig has_attentions = False def setUp(self): self.model_tester = TextNetModelTester(self)
transformers/tests/models/textnet/test_modeling_textnet.py/0
{ "file_path": "transformers/tests/models/textnet/test_modeling_textnet.py", "repo_id": "transformers", "token_count": 5701 }
520
# Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Optional, Union import numpy as np from transformers.image_transforms import PaddingMode from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor, TvpImageProcessorFast class TvpImageProcessingTester: def __init__( self, parent, do_resize: bool = True, size: dict[str, int] = {"longest_edge": 40}, do_center_crop: bool = False, crop_size: Optional[dict[str, int]] = None, do_rescale: bool = False, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: dict[str, int] = {"height": 80, "width": 80}, fill: Optional[int] = None, pad_mode: PaddingMode = None, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, list[float]]] = [0.26862954, 0.26130258, 0.27577711], batch_size=2, min_resolution=40, max_resolution=80, num_channels=3, num_frames=2, ): self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self.fill = fill self.pad_mode = pad_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_frames = num_frames def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "do_center_crop": self.do_center_crop, "do_pad": self.do_pad, "pad_size": self.pad_size, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to TvpImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: return (int(self.pad_size["height"]), int(self.pad_size["width"])) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class TvpImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvpImageProcessor if is_vision_available() else None fast_image_processing_class = ( TvpImageProcessorFast if is_vision_available() and is_torchvision_available() else None ) def setUp(self): super().setUp() self.image_processor_tester = TvpImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "pad_size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"longest_edge": 40}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 12}) self.assertEqual(image_processor.size, {"longest_edge": 12}) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values( video_inputs, batched=True ) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Test numpy with both processors for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # For fast processor, convert numpy to tensor if image_processing_class == self.fast_image_processing_class: # Convert numpy arrays to tensors for fast processor tensor_video_inputs = [] for video in video_inputs: tensor_video = [torch.from_numpy(frame) for frame in video] tensor_video_inputs.append(tensor_video) test_inputs = tensor_video_inputs else: test_inputs = video_inputs # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(test_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values( video_inputs, batched=True ) encoded_videos = image_processing(test_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy_4_channels(self): # Test numpy with both processors for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # For fast processor, convert numpy to tensor if image_processing_class == self.fast_image_processing_class: # Convert numpy arrays to tensors for fast processor tensor_video_inputs = [] for video in video_inputs: tensor_video = [torch.from_numpy(frame) for frame in video] tensor_video_inputs.append(tensor_video) test_inputs = tensor_video_inputs else: test_inputs = video_inputs # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing( test_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values( video_inputs, batched=True ) encoded_videos = image_processing( test_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Test PyTorch tensors with both processors for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values( video_inputs, batched=True ) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") # Higher max atol for video processing, mean_atol still 5e-3 -> 1e-2 self._assert_slow_fast_tensors_equivalence( encoding_slow.pixel_values, encoding_fast.pixel_values, atol=10.0, mean_atol=1e-2 )
transformers/tests/models/tvp/test_image_processing_tvp.py/0
{ "file_path": "transformers/tests/models/tvp/test_image_processing_tvp.py", "repo_id": "transformers", "token_count": 7497 }
521
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch UperNet framework.""" import unittest from datasets import load_dataset from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import ( require_timm, require_torch, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from transformers.utils.import_utils import get_torch_major_and_minor_version from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation if is_vision_available(): from transformers import AutoImageProcessor class UperNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 1, 1], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.out_features = out_features self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return UperNetConfig( backbone_config=self.get_backbone_config(), backbone=None, hidden_size=64, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=32, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, num_labels=self.num_labels, ) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): model = UperNetForSemanticSegmentation(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UperNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as UperNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (UperNetForSemanticSegmentation,) if is_torch_available() else () pipeline_model_mapping = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False has_attentions = False test_torch_exportable = True test_torch_exportable_strictly = get_torch_major_and_minor_version() != "2.7" def setUp(self): self.model_tester = UperNetModelTester(self) self.config_tester = ConfigTester( self, config_class=UperNetConfig, has_text_modality=False, hidden_size=37, common_properties=["hidden_size"], ) def test_config(self): self.config_tester.run_common_tests() def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @unittest.skip(reason="UperNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="UperNet does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.backbone_config = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "UperNetForUniversalSegmentation": self.assertEqual(model.backbone.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "UperNetForUniversalSegmentation": self.assertEqual(model.backbone.out_indices, [1, 2, 3]) @unittest.skip(reason="UperNet does not have tied weights") def test_tied_model_weights_key_ignore(self): pass @slow def test_model_from_pretrained(self): model_name = "openmmlab/upernet-convnext-tiny" model = UperNetForSemanticSegmentation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of ADE20k def prepare_img(): ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") return ds[0]["image"].convert("RGB") @require_torch @require_vision @slow class UperNetModelIntegrationTest(unittest.TestCase): def test_inference_swin_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_inference_convnext_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/upernet/test_modeling_upernet.py/0
{ "file_path": "transformers/tests/models/upernet/test_modeling_upernet.py", "repo_id": "transformers", "token_count": 5203 }
522
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor, ViTImageProcessorFast @require_tokenizers @require_vision class VisionTextDualEncoderProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = VisionTextDualEncoderProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } cls.image_processor_file = os.path.join(cls.tmpdirname, IMAGE_PROCESSOR_NAME) with open(cls.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) tokenizer = cls.get_tokenizer() image_processor = cls.get_image_processor() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) processor.save_pretrained(cls.tmpdirname) @classmethod def get_tokenizer(cls, **kwargs): return BertTokenizer.from_pretrained(cls.tmpdirname, **kwargs) @classmethod def get_image_processor(cls, **kwargs): if is_torchvision_available(): return ViTImageProcessorFast.from_pretrained(cls.tmpdirname, **kwargs) return ViTImageProcessor.from_pretrained(cls.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) with tempfile.TemporaryDirectory() as tmpdir: processor.save_pretrained(tmpdir) processor = VisionTextDualEncoderProcessor.from_pretrained(tmpdir) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.image_processor, (ViTImageProcessor, ViTImageProcessorFast)) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, (ViTImageProcessor, ViTImageProcessorFast)) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="pt") input_processor = processor(images=image_input, return_tensors="pt") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with self.assertRaises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/vision_text_dual_encoder/test_processing_vision_text_dual_encoder.py/0
{ "file_path": "transformers/tests/models/vision_text_dual_encoder/test_processing_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 2761 }
523
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VitPoseImageProcessor class VitPoseImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_affine_transform=True, size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_affine_transform = do_affine_transform self.size = size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "do_affine_transform": self.do_affine_transform, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VitPoseImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VitPoseImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = VitPoseImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_affine_transform")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size={"height": 42, "width": 42} ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] encoded_images = image_processing(image_inputs[0], boxes=boxes, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (2, *expected_output_image_shape)) # Test batched boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] * self.image_processor_tester.batch_size encoded_images = image_processing(image_inputs, boxes=boxes, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size * 2, *expected_output_image_shape) ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] encoded_images = image_processing(image_inputs[0], boxes=boxes, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (2, *expected_output_image_shape)) # Test batched boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] * self.image_processor_tester.batch_size encoded_images = image_processing(image_inputs, boxes=boxes, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size * 2, *expected_output_image_shape) ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] encoded_images = image_processing(image_inputs[0], boxes=boxes, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (2, *expected_output_image_shape)) # Test batched boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] * self.image_processor_tester.batch_size encoded_images = image_processing(image_inputs, boxes=boxes, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size * 2, *expected_output_image_shape) ) def test_call_numpy_4_channels(self): # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] encoded_images = image_processor( image_inputs[0], boxes=boxes, return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (len(boxes[0]), *expected_output_image_shape)) # Test batched boxes = [[[0, 0, 1, 1], [0.5, 0.5, 0.5, 0.5]]] * self.image_processor_tester.batch_size encoded_images = image_processor( image_inputs, boxes=boxes, return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size * len(boxes[0]), *expected_output_image_shape), )
transformers/tests/models/vitpose/test_image_processing_vitpose.py/0
{ "file_path": "transformers/tests/models/vitpose/test_image_processing_vitpose.py", "repo_id": "transformers", "token_count": 4123 }
524
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Wav2Vec2 model.""" import math import multiprocessing import os import pickle import tempfile import traceback import unittest import numpy as np from datasets import load_dataset from pytest import mark from transformers import Wav2Vec2Config, is_torch_available from transformers.testing_utils import ( CaptureLogger, cleanup, is_flaky, is_pyctcdecode_available, is_torchaudio_available, require_flash_attn, require_pyctcdecode, require_torch, require_torch_gpu, require_torchaudio, require_torchcodec, run_test_in_subprocess, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from transformers import ( Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_wav2vec2 import ( WAV2VEC2_ADAPTER_PT_FILE, WAV2VEC2_ADAPTER_SAFE_FILE, Wav2Vec2GumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) if is_torchaudio_available(): import torchaudio if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm from transformers.utils.fx import symbolic_trace def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset("fixie-ai/common_voice_17_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # use a spawn pool, which should trigger a warning if different than fork with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text expected_transcription = "el resto de los equipos se mantienen en su sede" unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], expected_transcription) # force batch_decode to internally create a spawn pool, which should trigger a warning if different than fork multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(logits.cpu().numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], expected_transcription) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class Wav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_with_attn_adapter(self, config, input_values, attention_mask): config.adapter_attn_dim = 16 model = Wav2Vec2ForCTC(config=config) self.parent.assertIsNotNone(model._get_adapters()) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.output_seq_length, self.vocab_size)) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ForCTC, "feature-extraction": Wav2Vec2Model, "fill-mask": Wav2Vec2ForMaskedLM, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Model has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Model has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Model has no tokens embeds") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Model has no inputs_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # force eager attention to support output attentions config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) # Wav2Vec2 cannot be torchscripted because of group norm. def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): # TODO: fix it self.skipTest(reason="torch 2.1 breaks torch fx tests for wav2vec2/hubert.") if not self.fx_compatible: self.skipTest(reason="torch fx is not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) if ( isinstance(model, Wav2Vec2ForSequenceClassification) and not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForXVector, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @is_flaky( description="The `codevector_idx` computed with `argmax()` in `Wav2Vec2GumbelVectorQuantizer.forward` is not stable." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_model_with_attn_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_attn_adapter(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Model has no input_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Model has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Model has no token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Model has no input_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # force eager attention to support output attentions config._attn_implementation = "eager" # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_model_for_pretraining(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = Wav2Vec2ForPreTraining(config).to(torch_device) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # more losses mask_time_indices[:, : mask_time_indices.shape[-1] // 2] = True sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices.cpu().numpy()) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss_more_masked = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # loss_more_masked has to be bigger or equal loss since more masked inputs have to be predicted self.assertTrue(loss.item() <= loss_more_masked.item()) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_load_and_set_attn_adapter(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="it") logits = get_logits(model, input_features) model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") model_2.load_adapter("it") logits_2 = get_logits(model_2, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) # test that loading adapter weights with mismatched vocab sizes can be loaded def test_load_target_lang_with_mismatched_size(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="fr", ignore_mismatched_sizes=True ) logits = get_logits(model, input_features) model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") model_2.load_adapter("fr") logits_2 = get_logits(model_2, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) def test_load_attn_adapter(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", adapter_attn_dim=16) with tempfile.TemporaryDirectory() as tempdir: model.save_pretrained(tempdir) model = Wav2Vec2ForCTC.from_pretrained(tempdir) logits = get_logits(model, input_features) adapter_weights = model._get_adapters() # save safe weights safe_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_SAFE_FILE.format("eng")) safe_save_file(adapter_weights, safe_filepath, metadata={"format": "pt"}) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=True) with self.assertRaises(OSError): model.load_adapter("eng", use_safetensors=False) with self.assertRaises(Exception): model.load_adapter("ita", use_safetensors=True) logits_2 = get_logits(model, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) with tempfile.TemporaryDirectory() as tempdir: model.save_pretrained(tempdir) model = Wav2Vec2ForCTC.from_pretrained(tempdir) logits = get_logits(model, input_features) adapter_weights = model._get_adapters() # save pt weights pt_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_PT_FILE.format("eng")) torch.save(adapter_weights, pt_filepath) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=False) with self.assertRaises(OSError): model.load_adapter("eng", use_safetensors=True) logits_2 = get_logits(model, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") logits = get_logits(model, input_features) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=False) model.load_adapter("eng", use_safetensors=True) logits_2 = get_logits(model, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 sequence = torch.div( torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size, rounding_mode="floor" ) features = sequence.view(sequence_length, hidden_size) # each value in vector consists of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 sequence = torch.div( torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size, rounding_mode="floor" ) features = sequence.view(sequence_length, hidden_size) # each value in vector consists of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @require_torchcodec @slow class Wav2Vec2ModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_inference_integration(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) np.random.seed(4) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # cosine similarity of model is all > 0.5 as model is # pre-trained on contrastive loss # fmt: off expected_cosine_sim_masked = torch.tensor([ 0.8523, 0.5860, 0.6905, 0.5557, 0.7456, 0.5249, 0.6639, 0.7654, 0.7565, 0.8167, 0.8222, 0.7960, 0.8034, 0.8166, 0.8310, 0.8263, 0.8274, 0.8258, 0.8179, 0.8412, 0.8536, 0.5098, 0.4728, 0.6461, 0.4498, 0.6002, 0.5774, 0.6457, 0.7123, 0.5668, 0.6866, 0.4960, 0.6293, 0.7423, 0.7419, 0.7526, 0.7768, 0.4898, 0.5393, 0.8183 ], device=torch_device) # fmt: on torch.testing.assert_close(cosine_sim_masked, expected_cosine_sim_masked, rtol=1e-3, atol=1e-3) def test_inference_pretrained(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-base") model_rand = Wav2Vec2ForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2 model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2 model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_loss_pretraining(self): model = Wav2Vec2ForPreTraining.from_pretrained( "facebook/wav2vec2-base", attention_dropout=0.0, feat_proj_dropout=0.0, hidden_dropout=0.0, layerdrop=0.0, ) model.to(torch_device).train() feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) np.random.seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices( mask_time_indices.shape, model.config.num_negatives, mask_time_indices ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ) # check diversity loss num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors self.assertTrue(abs(diversity_loss.item() - 0.9538) < 1e-3) # check overall loss (contrastive loss + diversity loss) expected_loss = 116.7094 self.assertTrue(abs(outputs.loss.item() - expected_loss) < 1e-3) def test_inference_keyword_spotting(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [7, 6, 10, 9] # s3prl logits for the same batch expected_logits = torch.tensor([6.1186, 11.8961, 10.2931, 6.0898], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2) def test_inference_intent_classification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [0, 0, 2, 3] expected_logits_action = torch.tensor([0.4568, 11.0848, 1.6621, 9.3841], device=torch_device) expected_labels_object = [3, 10, 3, 4] expected_logits_object = torch.tensor([1.5322, 10.7094, 5.2469, 22.1318], device=torch_device) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor([1.5335, 6.5096, 10.5704, 11.0569], device=torch_device) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) torch.testing.assert_close(predicted_logits_action, expected_logits_action, rtol=1e-2, atol=1e-2) torch.testing.assert_close(predicted_logits_object, expected_logits_object, rtol=1e-2, atol=1e-2) torch.testing.assert_close(predicted_logits_location, expected_logits_location, rtol=1e-2, atol=1e-2) def test_inference_speaker_identification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [251, 1, 1, 3] # s3prl logits for the same batch expected_logits = torch.tensor([37.5627, 71.6362, 64.2419, 31.7778], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2) def test_inference_emotion_recognition(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] # s3prl logits for the same batch expected_logits = torch.tensor([2.1722, 3.0779, 8.0287, 6.6797], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2) def test_phoneme_recognition(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "ɐ m æ n s ɛ d t ə ð ə j uː n ɪ v ɚ s s ɚ aɪ ɛ ɡ z ɪ s t", "s w ɛ t k ʌ v ɚ d b ɹ iː ɔ n z b ɑː d i t ɹ ɪ k l ɪ ŋ ɪ n t ə ð ə t aɪ t l oɪ n k l ɑː θ ð æ w ʌ z ð ɪ oʊ" " n l i ɡ ɑːɹ m ə n t h iː w ɔːɹ", "ð ə k aɪ t ɔ n h ɪ z tʃ ɛ s t s t ɪ l d ɹ ɪ p ɪ ŋ b l ʌ d ð ɪ eɪ k ʌ v h ɪ z oʊ v ɚ s t ɹ eɪ n d aɪ z iː" " v ə n ð ə s ɔːɹ ɹ ɪ ŋ ɐ ɹ iː n ɐ ɚ ɹ aʊ n d h ɪ m w ɪ ð ə θ aʊ z ə n d z ʌ v s p ɛ k t eɪ ɾ ɚ z w ɜː t ɹ" " ɪ v ɪ æ l ᵻ ɾ i z n ɑː t w ɜː θ θ ɪ ŋ k ɪ ŋ ɐ b aʊ t", "h ɪ z ɪ n s t ə n t v p æ n ɪ k w ʌ z f ɑː l oʊ d b aɪ ɐ s m ɔː l ʃ ɑːɹ p b l oʊ h aɪ ɔ n h ɪ z tʃ ɛ s t", ] # should correspond to =>: # [ # "a man said to the universe sir i exist", # "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", # "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about", # "his instant panic was followed by a small sharp blow high on his chest", # ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm(self): ds = load_dataset("fixie-ai/common_voice_17_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits transcription = processor.batch_decode(logits.cpu().numpy()).text expected_transcription = "el resto de los equipos se mantienen en su sede" self.assertEqual(transcription[0], expected_transcription) @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_pool(self): ds = load_dataset("fixie-ai/common_voice_17_0", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # test user-managed pool with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text expected_transcription = "el resto de los equipos se mantienen en su sede" self.assertEqual(transcription[0], expected_transcription) # user-managed pool + num_processes should trigger a warning with ( CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool(2) as pool, ): transcription = processor.batch_decode(logits.cpu().numpy(), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], expected_transcription) @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None) def test_inference_diarization(self): model = Wav2Vec2ForAudioFrameClassification.from_pretrained("anton-l/wav2vec2-base-superb-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.2807, -5.1272], [-5.4059, -4.7757], [-5.2764, -4.9621], [-5.0117, -4.5851]], [[-1.7643, -0.5462], [-1.7369, -0.2649], [-1.5066, -0.6200], [-4.5703, -2.4863]], [[-0.8656, -0.4783], [-0.8899, -0.3289], [-0.9267, -0.5781], [-0.7817, -0.4619]], [[-4.8625, -2.5316], [-5.2339, -2.2155], [-4.9835, -2.0344], [-4.4727, -1.8421]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 1].sum(), 299) torch.testing.assert_close(outputs.logits[:, :4], expected_logits, rtol=1e-2, atol=1e-2) def test_inference_speaker_verification(self): model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1).cpu() cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).numpy(), 0.9758, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).numpy(), 0.7579, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2) @require_torchaudio def test_inference_mms_1b_all(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/mms-1b-all").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/mms-1b-all") LANG_MAP = {"it": "ita", "es": "spa", "fr": "fra", "en": "eng"} def run_model(lang): ds = load_dataset("fixie-ai/common_voice_17_0", lang, split="test", streaming=True) sample = next(iter(ds)) wav2vec2_lang = LANG_MAP[lang] model.load_adapter(wav2vec2_lang) processor.tokenizer.set_target_lang(wav2vec2_lang) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() inputs = processor(resampled_audio, sampling_rate=16_000, return_tensors="pt") input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) return transcription TRANSCRIPTIONS = { "it": "viaggiarono in italia dove lavorarono con hamilton", "es": "el resto de los equipos se mantienen en su sede", "fr": "il a obtenu son batchelor of lows", "en": "joe keton disapproved of films and buster also had reservations about the media", } for lang in LANG_MAP: assert run_model(lang) == TRANSCRIPTIONS[lang] @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_inference_ctc_fa2(self): model_fa = Wav2Vec2ForCTC.from_pretrained( "facebook/wav2vec2-base-960h", attn_implementation="flash_attention_2", dtype=torch.bfloat16 ) model_fa.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model_fa(input_values.to(torch.bfloat16)).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_inference_ctc_fa2_batched(self): model_fa = Wav2Vec2ForCTC.from_pretrained( "facebook/wav2vec2-base-960h", attn_implementation="flash_attention_2", dtype=torch.bfloat16 ) model_fa.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True, return_attention_mask=True) inputs = inputs.to(torch_device) with torch.no_grad(): logits = model_fa(inputs.input_values.to(torch.bfloat16), attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/wav2vec2/test_modeling_wav2vec2.py/0
{ "file_path": "transformers/tests/models/wav2vec2/test_modeling_wav2vec2.py", "repo_id": "transformers", "token_count": 39271 }
525
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import datasets from huggingface_hub import ImageClassificationOutputElement from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_torch_available, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _dataset = None @classmethod def _load_dataset(cls): # Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process. if cls._dataset is None: # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 cls._dataset = datasets.load_dataset( "hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1" ) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, dtype="float32", ): image_classifier = ImageClassificationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, dtype=dtype, top_k=2, ) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): self._load_dataset() outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA self._dataset[0]["image"], # LA self._dataset[1]["image"], # L self._dataset[2]["image"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) for single_output in outputs: for output_element in single_output: compare_pipeline_output_to_hub_spec(output_element, ImageClassificationOutputElement) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @require_torch def test_torch_float16_pipeline(self): image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", dtype=torch.float16 ) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=3), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) @require_torch def test_torch_bfloat16_pipeline(self): image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", dtype=torch.bfloat16 ) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=3), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], ) @slow @require_torch def test_multilabel_classification(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) image_classifier.model.config.problem_type = "multi_label_classification" outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ], ) @slow @require_torch def test_function_to_apply(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier( "http://images.cocodataset.org/val2017/000000039769.jpg", function_to_apply="sigmoid", ) self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], )
transformers/tests/pipelines/test_pipelines_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_classification.py", "repo_id": "transformers", "token_count": 5343 }
526
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, dtype="float32", ): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, dtype=dtype, src_lang=src_lang, tgt_lang=tgt_lang, max_new_tokens=20, ) else: translator = TranslationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, dtype=dtype, max_new_tokens=20, ) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): # We don't provide a default for this pair with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") # but we do for this one translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) # Missing src_lang, tgt_lang with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) # src_lang, tgt_lang can be defined at pipeline call time translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
transformers/tests/pipelines/test_pipelines_translation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_translation.py", "repo_id": "transformers", "token_count": 2903 }
527
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, VptqConfig from transformers.testing_utils import ( backend_empty_cache, require_accelerate, require_torch_gpu, require_torch_multi_gpu, require_vptq, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights class VptqConfigTest(unittest.TestCase): def test_to_dict(self): """ Makes sure the config format is properly set """ quantization_config = VptqConfig() vptq_orig_config = quantization_config.to_dict() self.assertEqual(vptq_orig_config["quant_method"], quantization_config.quant_method) @slow @require_torch_gpu @require_vptq @require_accelerate class VptqTest(unittest.TestCase): model_name = "VPTQ-community/Meta-Llama-3.1-8B-Instruct-v12-k65536-4096-woft" input_text = "Hello my name is" max_new_tokens = 32 EXPECTED_OUTPUT = "Hello my name is Sarah and I am a 25 year old woman from the United States. I am a college graduate and I am currently working as a marketing specialist for a small" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = VptqConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from vptq import VQuantLinear from transformers.integrations import replace_with_vptq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") modules_to_not_convert = ["lm_head"] names = [ "q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2", ] value = { "enable_norm": True, "enable_perm": True, "group_num": 1, "group_size": 128, "indices_as_float": False, "num_centroids": [-1, 128], "num_res_centroids": [-1, 128], "outlier_size": 0, "vector_lens": [-1, 12], } shared_layer_config = {} for name in names: shared_layer_config[name] = value for i in range(24): modules_to_not_convert.append(f"model.decoder.layers.{i}.fc1") layer_configs = {} layer_configs["model.decoder.project_out"] = value layer_configs["model.decoder.project_in"] = value quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config) with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_vptq_linear(model, quantization_config=quantization_config) nb_vptq_linear = 0 for module in model.modules(): if isinstance(module, VQuantLinear): nb_vptq_linear += 1 self.assertEqual(nb_linears - 1, nb_vptq_linear) # Try with `linear_weights_not_to_quantize` with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config) model, _ = replace_with_vptq_linear( model, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert ) nb_vptq_linear = 0 for module in model.modules(): if isinstance(module, VQuantLinear): nb_vptq_linear += 1 # 25 comes from 24 decoder.layers.{layer_idx}.fc1 # and the last lm_head self.assertEqual(nb_linears - 25, nb_vptq_linear)
transformers/tests/quantization/vptq_integration/test_vptq.py/0
{ "file_path": "transformers/tests/quantization/vptq_integration/test_vptq.py", "repo_id": "transformers", "token_count": 3067 }
528
# Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import logging import os import unittest from pathlib import Path from typing import Union import transformers from transformers.testing_utils import require_torch, slow logger = logging.getLogger() @unittest.skip(reason="Temporarily disable the doc tests.") @require_torch @slow class TestCodeExamples(unittest.TestCase): def analyze_directory( self, directory: Path, identifier: Union[str, None] = None, ignore_files: Union[list[str], None] = None, n_identifier: Union[str, list[str], None] = None, only_modules: bool = True, ): """ Runs through the specific directory, looking for the files identified with `identifier`. Executes the doctests in those files Args: directory (`Path`): Directory containing the files identifier (`str`): Will parse files containing this ignore_files (`List[str]`): List of files to skip n_identifier (`str` or `List[str]`): Will not parse files containing this/these identifiers. only_modules (`bool`): Whether to only analyze modules """ files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))] if identifier is not None: files = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(n_identifier, list): for n_ in n_identifier: files = [file for file in files if n_ not in file] else: files = [file for file in files if n_identifier not in file] ignore_files = ignore_files or [] ignore_files.append("__init__.py") files = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing", file) if only_modules: module_identifier = file.split(".")[0] try: module_identifier = getattr(transformers, module_identifier) suite = doctest.DocTestSuite(module_identifier) result = unittest.TextTestRunner().run(suite) self.assertIs(len(result.failures), 0) except AttributeError: logger.info(f"{module_identifier} is not a module.") else: result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS) self.assertIs(result.failed, 0) def test_modeling_examples(self): transformers_directory = Path("src/transformers") files = "modeling" ignore_files = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files) def test_tokenization_examples(self): transformers_directory = Path("src/transformers") files = "tokenization" self.analyze_directory(transformers_directory, identifier=files) def test_configuration_examples(self): transformers_directory = Path("src/transformers") files = "configuration" self.analyze_directory(transformers_directory, identifier=files) def test_remaining_examples(self): transformers_directory = Path("src/transformers") n_identifiers = ["configuration", "modeling", "tokenization"] self.analyze_directory(transformers_directory, n_identifier=n_identifiers) def test_doc_sources(self): doc_source_directory = Path("docs/source") ignore_files = ["favicon.ico"] self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
transformers/tests/utils/test_doc_samples.py/0
{ "file_path": "transformers/tests/utils/test_doc_samples.py", "repo_id": "transformers", "token_count": 1731 }
529
# Copyright 2020 The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import unittest from dataclasses import dataclass from typing import Optional import pytest from transformers import AlbertForMaskedLM from transformers.testing_utils import require_torch from transformers.utils import ModelOutput, is_torch_available if is_torch_available(): import torch @dataclass class ModelOutputTest(ModelOutput): a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputTester(unittest.TestCase): def test_get_attributes(self): x = ModelOutputTest(a=30) self.assertEqual(x.a, 30) self.assertIsNone(x.b) self.assertIsNone(x.c) with self.assertRaises(AttributeError): _ = x.d def test_index_with_ints_and_slices(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) x = ModelOutputTest(a=30, c=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) def test_index_with_strings(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x["a"], 30) self.assertEqual(x["b"], 10) with self.assertRaises(KeyError): _ = x["c"] x = ModelOutputTest(a=30, c=10) self.assertEqual(x["a"], 30) self.assertEqual(x["c"], 10) with self.assertRaises(KeyError): _ = x["b"] def test_dict_like_properties(self): x = ModelOutputTest(a=30) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(list(x.values()), [30]) self.assertEqual(list(x.items()), [("a", 30)]) self.assertEqual(list(x), ["a"]) x = ModelOutputTest(a=30, b=10) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("b", 10)]) self.assertEqual(list(x), ["a", "b"]) x = ModelOutputTest(a=30, c=10) self.assertEqual(list(x.keys()), ["a", "c"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("c", 10)]) self.assertEqual(list(x), ["a", "c"]) with self.assertRaises(Exception): x = x.update({"d": 20}) with self.assertRaises(Exception): del x["a"] with self.assertRaises(Exception): _ = x.pop("a") with self.assertRaises(Exception): _ = x.setdefault("d", 32) def test_set_attributes(self): x = ModelOutputTest(a=30) x.a = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_set_keys(self): x = ModelOutputTest(a=30) x["a"] = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_instantiate_from_dict(self): x = ModelOutputTest({"a": 30, "b": 10}) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) def test_instantiate_from_iterator(self): x = ModelOutputTest([("a", 30), ("b", 10)]) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) with self.assertRaises(ValueError): _ = ModelOutputTest([("a", 30), (10, 10)]) x = ModelOutputTest(a=(30, 30)) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(x.a, (30, 30)) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch.utils._pytree as pytree x = ModelOutput({"a": 1.0, "c": 2.0}) self.assertFalse(pytree._is_leaf(x)) x = ModelOutputTest(a=1.0, c=2.0) self.assertFalse(pytree._is_leaf(x)) expected_flat_outs = [1.0, 2.0] expected_tree_spec = pytree.TreeSpec(ModelOutputTest, ["a", "c"], [pytree.LeafSpec(), pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x) self.assertEqual( pytree.treespec_dumps(actual_tree_spec), '[1, {"type": "tests.utils.test_model_output.ModelOutputTest", "context": "[\\"a\\", \\"c\\"]", "children_spec": [{"type": null, "context": null, "children_spec": []}, {"type": null, "context": null, "children_spec": []}]}]', ) # TODO: @ydshieh @unittest.skip(reason="CPU OOM") @require_torch @pytest.mark.torch_export_test def test_export_serialization(self): model_cls = AlbertForMaskedLM model_config = model_cls.config_class() model = model_cls(model_config) input_dict = {"input_ids": torch.randint(0, 30000, (1, 512), dtype=torch.int64, requires_grad=False)} ep = torch.export.export(model, (), input_dict) buffer = io.BytesIO() torch.export.save(ep, buffer) buffer.seek(0) loaded_ep = torch.export.load(buffer) input_dict = {"input_ids": torch.randint(0, 30000, (1, 512), dtype=torch.int64, requires_grad=False)} assert torch.allclose(model(**input_dict).logits, loaded_ep(**input_dict).logits) class ModelOutputTestNoDataclass(ModelOutput): """Invalid test subclass of ModelOutput where @dataclass decorator is not used""" a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputSubclassTester(unittest.TestCase): def test_direct_model_output(self): # Check that direct usage of ModelOutput instantiates without errors ModelOutput({"a": 1.1}) def test_subclass_no_dataclass(self): # Check that a subclass of ModelOutput without @dataclass is invalid # A valid subclass is inherently tested other unit tests above. with self.assertRaises(TypeError): ModelOutputTestNoDataclass(a=1.1, b=2.2, c=3.3)
transformers/tests/utils/test_model_output.py/0
{ "file_path": "transformers/tests/utils/test_model_output.py", "repo_id": "transformers", "token_count": 3069 }
530
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for cleaning the model section of the table of content by removing duplicates and sorting the entries in alphabetical order. Usage (from the root of the repo): Check that the table of content is properly sorted (used in `make quality`): ```bash python utils/check_doc_toc.py ``` Auto-sort the table of content if it is not properly sorted (used in `make style`): ```bash python utils/check_doc_toc.py --fix_and_overwrite ``` """ import argparse from collections import defaultdict import yaml PATH_TO_TOC = "docs/source/en/_toctree.yml" def clean_model_doc_toc(model_doc: list[dict]) -> list[dict]: """ Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates and sorting models alphabetically. Args: model_doc (`List[dict]`): The list of dictionaries extracted from the `_toctree.yml` file for this specific modality. Returns: `List[dict]`: List of dictionaries like the input, but cleaned up and sorted. """ counts = defaultdict(int) for doc in model_doc: counts[doc["local"]] += 1 duplicates = [key for key, value in counts.items() if value > 1] new_doc = [] for duplicate_key in duplicates: titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key}) if len(titles) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]}) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1]) # Sort return sorted(new_doc, key=lambda s: s["title"].lower()) def check_model_doc(overwrite: bool = False): """ Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model API doc) and potentially auto-cleans it. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`). """ with open(PATH_TO_TOC, encoding="utf-8") as f: content = yaml.safe_load(f.read()) # Get to the API doc api_idx = 0 while content[api_idx]["title"] != "API": api_idx += 1 api_doc = content[api_idx]["sections"] # Then to the model doc model_idx = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 model_doc = api_doc[model_idx]["sections"] # Extract the modalities and clean them one by one. modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section] diff = False for idx, modality_doc in modalities_docs: old_modality_doc = modality_doc["sections"] new_modality_doc = clean_model_doc_toc(old_modality_doc) if old_modality_doc != new_modality_doc: diff = True if overwrite: model_doc[idx]["sections"] = new_modality_doc if diff: if overwrite: api_doc[model_idx]["sections"] = model_doc content[api_idx]["sections"] = api_doc with open(PATH_TO_TOC, "w", encoding="utf-8") as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_model_doc(args.fix_and_overwrite)
transformers/utils/check_doc_toc.py/0
{ "file_path": "transformers/utils/check_doc_toc.py", "repo_id": "transformers", "token_count": 1727 }
531
""" Script which deprecates a list of given models Example usage: python utils/deprecate_models.py --models bert distilbert """ import argparse import os from collections import defaultdict from pathlib import Path from typing import Optional import requests from custom_init_isort import sort_imports_in_all_inits from git import Repo from packaging import version from transformers import CONFIG_MAPPING, logging from transformers import __version__ as current_version REPO_PATH = Path(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) repo = Repo(REPO_PATH) logger = logging.get_logger(__name__) def get_last_stable_minor_release(): # Get the last stable release of transformers url = "https://pypi.org/pypi/transformers/json" release_data = requests.get(url).json() # Find the last stable release of transformers (version below current version) major_version, minor_version, patch_version, _ = current_version.split(".") last_major_minor = f"{major_version}.{int(minor_version) - 1}" last_stable_minor_releases = [ release for release in release_data["releases"] if release.startswith(last_major_minor) ] last_stable_release = sorted(last_stable_minor_releases, key=version.parse)[-1] return last_stable_release def build_tip_message(last_stable_release): return ( """ <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. """ + f"""If you run into any issues running this model, please reinstall the last version that supported this model: v{last_stable_release}. You can do so by running the following command: `pip install -U transformers=={last_stable_release}`. </Tip>""" ) def insert_tip_to_model_doc(model_doc_path, tip_message): tip_message_lines = tip_message.split("\n") with open(model_doc_path, "r") as f: model_doc = f.read() # Add the tip message to the model doc page directly underneath the title lines = model_doc.split("\n") new_model_lines = [] for line in lines: if line.startswith("# "): new_model_lines.append(line) new_model_lines.extend(tip_message_lines) else: new_model_lines.append(line) with open(model_doc_path, "w") as f: f.write("\n".join(new_model_lines)) def get_model_doc_path(model: str) -> tuple[Optional[str], Optional[str]]: # Possible variants of the model name in the model doc path model_names = [model, model.replace("_", "-"), model.replace("_", "")] model_doc_paths = [REPO_PATH / f"docs/source/en/model_doc/{model_name}.md" for model_name in model_names] for model_doc_path, model_name in zip(model_doc_paths, model_names): if os.path.exists(model_doc_path): return model_doc_path, model_name return None, None def extract_model_info(model): model_info = {} model_doc_path, model_doc_name = get_model_doc_path(model) model_path = REPO_PATH / f"src/transformers/models/{model}" if model_doc_path is None: print(f"Model doc path does not exist for {model}") return None model_info["model_doc_path"] = model_doc_path model_info["model_doc_name"] = model_doc_name if not os.path.exists(model_path): print(f"Model path does not exist for {model}") return None model_info["model_path"] = model_path return model_info def update_relative_imports(filename, model): with open(filename, "r") as f: filelines = f.read() new_file_lines = [] for line in filelines.split("\n"): if line.startswith("from .."): new_file_lines.append(line.replace("from ..", "from ...")) else: new_file_lines.append(line) with open(filename, "w") as f: f.write("\n".join(new_file_lines)) def remove_copied_from_statements(model): model_path = REPO_PATH / f"src/transformers/models/{model}" for file in os.listdir(model_path): if file == "__pycache__": continue file_path = model_path / file with open(file_path, "r") as f: file_lines = f.read() new_file_lines = [] for line in file_lines.split("\n"): if "# Copied from" in line: continue new_file_lines.append(line) with open(file_path, "w") as f: f.write("\n".join(new_file_lines)) def move_model_files_to_deprecated(model): model_path = REPO_PATH / f"src/transformers/models/{model}" deprecated_model_path = REPO_PATH / f"src/transformers/models/deprecated/{model}" if not os.path.exists(deprecated_model_path): os.makedirs(deprecated_model_path) for file in os.listdir(model_path): if file == "__pycache__": continue repo.git.mv(f"{model_path}/{file}", f"{deprecated_model_path}/{file}") # For deprecated files, we then need to update the relative imports update_relative_imports(f"{deprecated_model_path}/{file}", model) def delete_model_tests(model): tests_path = REPO_PATH / f"tests/models/{model}" if os.path.exists(tests_path): repo.git.rm("-r", tests_path) def get_line_indent(s): return len(s) - len(s.lstrip()) def update_main_init_file(models): """ Replace all instances of model.model_name with model.deprecated.model_name in the __init__.py file Args: models (List[str]): The models to mark as deprecated """ filename = REPO_PATH / "src/transformers/__init__.py" with open(filename, "r") as f: init_file = f.read() # 1. For each model, find all the instances of model.model_name and replace with model.deprecated.model_name for model in models: init_file = init_file.replace(f'models.{model}"', f'models.deprecated.{model}"') init_file = init_file.replace(f"models.{model} import", f"models.deprecated.{model} import") with open(filename, "w") as f: f.write(init_file) # 2. Resort the imports sort_imports_in_all_inits(check_only=False) def remove_model_references_from_file(filename, models, condition): """ Remove all references to the given models from the given file Args: filename (str): The file to remove the references from models (List[str]): The models to remove condition (Callable): A function that takes the line and model and returns True if the line should be removed """ filename = REPO_PATH / filename with open(filename, "r") as f: init_file = f.read() new_file_lines = [] for i, line in enumerate(init_file.split("\n")): if any(condition(line, model) for model in models): continue new_file_lines.append(line) with open(filename, "w") as f: f.write("\n".join(new_file_lines)) def remove_model_config_classes_from_config_check(model_config_classes): """ Remove the deprecated model config classes from the check_config_attributes.py file Args: model_config_classes (List[str]): The model config classes to remove e.g. ["BertConfig", "DistilBertConfig"] """ filename = REPO_PATH / "utils/check_config_attributes.py" with open(filename, "r") as f: check_config_attributes = f.read() # Keep track as we have to delete comment above too in_special_cases_to_allow = False in_indent = False new_file_lines = [] for line in check_config_attributes.split("\n"): indent = get_line_indent(line) if (line.strip() == "SPECIAL_CASES_TO_ALLOW = {") or (line.strip() == "SPECIAL_CASES_TO_ALLOW.update("): in_special_cases_to_allow = True elif in_special_cases_to_allow and indent == 0 and line.strip() in ("}", ")"): in_special_cases_to_allow = False if in_indent: if line.strip().endswith(("]", "],")): in_indent = False continue if in_special_cases_to_allow and any( model_config_class in line for model_config_class in model_config_classes ): # Remove comments above the model config class to remove while new_file_lines[-1].strip().startswith("#"): new_file_lines.pop() if line.strip().endswith("["): in_indent = True continue elif any(model_config_class in line for model_config_class in model_config_classes): continue new_file_lines.append(line) with open(filename, "w") as f: f.write("\n".join(new_file_lines)) def add_models_to_deprecated_models_in_config_auto(models): """ Add the models to the DEPRECATED_MODELS list in configuration_auto.py and sorts the list to be in alphabetical order. """ filepath = REPO_PATH / "src/transformers/models/auto/configuration_auto.py" with open(filepath, "r") as f: config_auto = f.read() new_file_lines = [] deprecated_models_list = [] in_deprecated_models = False for line in config_auto.split("\n"): if line.strip() == "DEPRECATED_MODELS = [": in_deprecated_models = True new_file_lines.append(line) elif in_deprecated_models and line.strip() == "]": in_deprecated_models = False # Add the new models to deprecated models list deprecated_models_list.extend([f' "{model}", ' for model in models]) # Sort so they're in alphabetical order in the file deprecated_models_list = sorted(deprecated_models_list) new_file_lines.extend(deprecated_models_list) # Make sure we still have the closing bracket new_file_lines.append(line) elif in_deprecated_models: deprecated_models_list.append(line) else: new_file_lines.append(line) with open(filepath, "w") as f: f.write("\n".join(new_file_lines)) def deprecate_models(models): # Get model info skipped_models = [] models_info = defaultdict(dict) for model in models: single_model_info = extract_model_info(model) if single_model_info is None: skipped_models.append(model) else: models_info[model] = single_model_info model_config_classes = [] for model, model_info in models_info.items(): if model in CONFIG_MAPPING: model_config_classes.append(CONFIG_MAPPING[model].__name__) elif model_info["model_doc_name"] in CONFIG_MAPPING: model_config_classes.append(CONFIG_MAPPING[model_info["model_doc_name"]].__name__) else: skipped_models.append(model) print(f"Model config class not found for model: {model}") # Filter out skipped models models = [model for model in models if model not in skipped_models] if skipped_models: print(f"Skipped models: {skipped_models} as the model doc or model path could not be found.") print(f"Models to deprecate: {models}") # Remove model config classes from config check print("Removing model config classes from config checks") remove_model_config_classes_from_config_check(model_config_classes) tip_message = build_tip_message(get_last_stable_minor_release()) for model, model_info in models_info.items(): print(f"Processing model: {model}") # Add the tip message to the model doc page directly underneath the title print("Adding tip message to model doc page") insert_tip_to_model_doc(model_info["model_doc_path"], tip_message) # Remove #Copied from statements from model's files print("Removing #Copied from statements from model's files") remove_copied_from_statements(model) # Move the model file to deprecated: src/transformers/models/model -> src/transformers/models/deprecated/model print("Moving model files to deprecated for model") move_model_files_to_deprecated(model) # Delete the model tests: tests/models/model print("Deleting model tests") delete_model_tests(model) # # We do the following with all models passed at once to avoid having to re-write the file multiple times print("Updating __init__.py file to point to the deprecated models") update_main_init_file(models) # Remove model references from other files print("Removing model references from other files") remove_model_references_from_file( "src/transformers/models/__init__.py", models, lambda line, model: model == line.strip().strip(",") ) remove_model_references_from_file( "utils/slow_documentation_tests.txt", models, lambda line, model: "/" + model + "/" in line ) remove_model_references_from_file("utils/not_doctested.txt", models, lambda line, model: "/" + model + "/" in line) # Add models to DEPRECATED_MODELS in the configuration_auto.py print("Adding models to DEPRECATED_MODELS in configuration_auto.py") add_models_to_deprecated_models_in_config_auto(models) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--models", nargs="+", help="List of models to deprecate") args = parser.parse_args() deprecate_models(args.models)
transformers/utils/deprecate_models.py/0
{ "file_path": "transformers/utils/deprecate_models.py", "repo_id": "transformers", "token_count": 5235 }
532
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import collections import functools import json import operator import os import re import sys import time from typing import Any, Optional, Union import requests from compare_test_runs import compare_job_sets from get_ci_error_statistics import get_jobs from get_previous_daily_ci import get_last_daily_ci_reports, get_last_daily_ci_run, get_last_daily_ci_workflow_run_id from huggingface_hub import HfApi from slack_sdk import WebClient # A map associating the job names (specified by `inputs.job` in a workflow file) with the keys of # `additional_files`. job_to_test_map = { "run_models_gpu": "Models", "run_trainer_and_fsdp_gpu": "Trainer & FSDP", "run_pipelines_torch_gpu": "PyTorch pipelines", "run_pipelines_tf_gpu": "TensorFlow pipelines", "run_examples_gpu": "Examples directory", "run_torch_cuda_extensions_gpu": "DeepSpeed", "run_quantization_torch_gpu": "Quantization", } # The values are used as the file names where to save the corresponding CI job results. test_to_result_name = { "Models": "model", "Trainer & FSDP": "trainer_and_fsdp", "PyTorch pipelines": "torch_pipeline", "TensorFlow pipelines": "tf_pipeline", "Examples directory": "example", "DeepSpeed": "deepspeed", "Quantization": "quantization", } NON_MODEL_TEST_MODULES = [ "deepspeed", "extended", "fixtures", "generation", "onnx", "optimization", "pipelines", "sagemaker", "trainer", "utils", "fsdp", "quantization", ] def handle_test_results(test_results): expressions = test_results.split(" ") failed = 0 success = 0 errors = 0 skipped = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. # It could be `'71.60s', '(0:01:11)', '====\n'` or `'in', '35.01s', '================\n'`. # Let always select the one with `s`. time_spent = expressions[-1] if "=" in time_spent: time_spent = expressions[-2] if "(" in time_spent: time_spent = expressions[-3] for i, expression in enumerate(expressions): if "failed" in expression: failed += int(expressions[i - 1]) if "errors" in expression: errors += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) if "skipped" in expression: skipped += int(expressions[i - 1]) return failed, errors, success, skipped, time_spent def handle_stacktraces(test_results): # These files should follow the following architecture: # === FAILURES === # <path>:<line>: Error ... # <path>:<line>: Error ... # <empty line> total_stacktraces = test_results.split("\n")[1:-1] stacktraces = [] for stacktrace in total_stacktraces: try: line = stacktrace[: stacktrace.index(" ")].split(":")[-2] error_message = stacktrace[stacktrace.index(" ") :] stacktraces.append(f"(line {line}) {error_message}") except Exception: stacktraces.append("Cannot retrieve error message.") return stacktraces def dicts_to_sum(objects: Union[dict[str, dict], list[dict]]): if isinstance(objects, dict): lists = objects.values() else: lists = objects # Convert each dictionary to counter counters = map(collections.Counter, lists) # Sum all the counters return functools.reduce(operator.add, counters) class Message: def __init__( self, title: str, ci_title: str, model_results: dict, additional_results: dict, selected_warnings: Optional[list] = None, prev_ci_artifacts=None, other_ci_artifacts=None, ): self.title = title self.ci_title = ci_title # Failures and success of the modeling tests self.n_model_success = sum(r["success"] for r in model_results.values()) self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values()) self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values()) # Some suites do not have a distinction between single and multi GPU. self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values()) self.n_model_failures = ( self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures ) # Failures and success of the additional tests self.n_additional_success = sum(r["success"] for r in additional_results.values()) if len(additional_results) > 0: # `dicts_to_sum` uses `dicts_to_sum` which requires a non empty dictionary. Let's just add an empty entry. all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()]) self.n_additional_single_gpu_failures = all_additional_failures["single"] self.n_additional_multi_gpu_failures = all_additional_failures["multi"] self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"] else: self.n_additional_single_gpu_failures = 0 self.n_additional_multi_gpu_failures = 0 self.n_additional_unknown_gpu_failures = 0 self.n_additional_failures = ( self.n_additional_single_gpu_failures + self.n_additional_multi_gpu_failures + self.n_additional_unknown_gpu_failures ) # Results self.n_failures = self.n_model_failures + self.n_additional_failures self.n_success = self.n_model_success + self.n_additional_success self.n_tests = self.n_failures + self.n_success self.model_results = model_results self.additional_results = additional_results self.thread_ts = None if selected_warnings is None: selected_warnings = [] self.selected_warnings = selected_warnings self.prev_ci_artifacts = prev_ci_artifacts self.other_ci_artifacts = other_ci_artifacts @property def time(self) -> str: all_results = [*self.model_results.values(), *self.additional_results.values()] time_spent = [] for r in all_results: if len(r["time_spent"]): time_spent.extend(r["time_spent"]) total_secs = sum(time_spent) hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f"{int(hours)}h{int(minutes)}m{int(seconds)}s" @property def header(self) -> dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def ci_title_section(self) -> dict: return {"type": "section", "text": {"type": "mrkdwn", "text": self.ci_title}} @property def no_failures(self) -> dict: return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def failures(self) -> dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n" f"The suite ran in {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def warnings(self) -> dict: # If something goes wrong, let's avoid the CI report failing to be sent. button_text = "Check warnings (Link not found)" # Use the workflow run link job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}" for job in github_actions_jobs: if "Extract warnings in CI artifacts" in job["name"] and job["conclusion"] == "success": button_text = "Check warnings" # Use the actual job link job_link = job["html_url"] break huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x] text = f"There are {len(self.selected_warnings)} warnings being selected." text += f"\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`." return { "type": "section", "text": { "type": "plain_text", "text": text, "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": button_text, "emoji": True}, "url": job_link, }, } @staticmethod def get_device_report(report, rjust=6): if "single" in report and "multi" in report: return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | " elif "single" in report: return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | " elif "multi" in report: return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | " @property def category_failures(self) -> dict: if job_name != "run_models_gpu": category_failures_report = "" return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}} model_failures = [v["failed"] for v in self.model_results.values()] category_failures = {} for model_failure in model_failures: for key, value in model_failure.items(): if key not in category_failures: category_failures[key] = dict(value) else: category_failures[key]["unclassified"] += value["unclassified"] category_failures[key]["single"] += value["single"] category_failures[key]["multi"] += value["multi"] individual_reports = [] for key, value in category_failures.items(): device_report = self.get_device_report(value) if sum(value.values()): if device_report: individual_reports.append(f"{device_report}{key}") else: individual_reports.append(key) header = "Single | Multi | Category\n" category_failures_report = prepare_reports( title="The following categories had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}} def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report): # noqa # Remove the leading and training parts that don't contain failure count information. model_failures = curr_failure_report.split("\n")[3:-2] prev_model_failures = prev_failure_report.split("\n")[3:-2] entries_changed = set(model_failures).difference(prev_model_failures) prev_map = {} for f in prev_model_failures: items = [x.strip() for x in f.split("| ")] prev_map[items[-1]] = [int(x) for x in items[:-1]] curr_map = {} for f in entries_changed: items = [x.strip() for x in f.split("| ")] curr_map[items[-1]] = [int(x) for x in items[:-1]] diff_map = {} for k, v in curr_map.items(): if k not in prev_map: diff_map[k] = v else: diff = [x - y for x, y in zip(v, prev_map[k])] if max(diff) > 0: diff_map[k] = diff entries_changed = [] for model_name, diff_values in diff_map.items(): diff = [str(x) for x in diff_values] diff = [f"+{x}" if (x != "0" and not x.startswith("-")) else x for x in diff] diff = [x.rjust(9) for x in diff] device_report = " | ".join(diff) + " | " report = f"{device_report}{model_name}" entries_changed.append(report) entries_changed = sorted(entries_changed, key=lambda s: s.split("| ")[-1]) return entries_changed @property def model_failures(self) -> list[dict]: # Obtain per-model failures def per_model_sum(model_category_dict): return dicts_to_sum(model_category_dict["failed"].values()) failures = {} non_model_failures = { k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values()) } for k, v in self.model_results.items(): # The keys in `model_results` may contain things like `models_vit` or `quantization_autoawq` # Remove the prefix to make the report cleaner. k = k.replace("models_", "").replace("quantization_", "") if k in NON_MODEL_TEST_MODULES: continue if sum(per_model_sum(v).values()): dict_failed = dict(v["failed"]) # Model job has a special form for reporting if job_name == "run_models_gpu": pytorch_specific_failures = dict_failed.pop("PyTorch") tensorflow_specific_failures = dict_failed.pop("TensorFlow") other_failures = dicts_to_sum(dict_failed.values()) failures[k] = { "PyTorch": pytorch_specific_failures, "TensorFlow": tensorflow_specific_failures, "other": other_failures, } else: test_name = job_to_test_map[job_name] specific_failures = dict_failed.pop(test_name) failures[k] = { test_name: specific_failures, } model_reports = [] other_module_reports = [] for key, value in non_model_failures.items(): key = key.replace("models_", "").replace("quantization_", "") if key in NON_MODEL_TEST_MODULES: device_report = self.get_device_report(value) if sum(value.values()): if device_report: report = f"{device_report}{key}" else: report = key other_module_reports.append(report) for key, value in failures.items(): # Model job has a special form for reporting if job_name == "run_models_gpu": device_report_values = [ value["PyTorch"]["single"], value["PyTorch"]["multi"], value["TensorFlow"]["single"], value["TensorFlow"]["multi"], sum(value["other"].values()), ] else: test_name = job_to_test_map[job_name] device_report_values = [ value[test_name]["single"], value[test_name]["multi"], ] if sum(device_report_values): # This is related to `model_header` below rjust_width = 9 if job_name == "run_models_gpu" else 6 device_report = " | ".join([str(x).rjust(rjust_width) for x in device_report_values]) + " | " report = f"{device_report}{key}" model_reports.append(report) # (Possibly truncated) reports for the current workflow run - to be sent to Slack channels if job_name == "run_models_gpu": model_header = "Single PT | Multi PT | Single TF | Multi TF | Other | Category\n" else: model_header = "Single | Multi | Category\n" # Used when calling `prepare_reports` below to prepare the `title` argument label = test_to_result_name[job_to_test_map[job_name]] sorted_model_reports = sorted(model_reports, key=lambda s: s.split("| ")[-1]) model_failures_report = prepare_reports( title=f"These following {label} modules had failures", header=model_header, reports=sorted_model_reports ) module_header = "Single | Multi | Category\n" sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("| ")[-1]) module_failures_report = prepare_reports( title=f"The following {label} modules had failures", header=module_header, reports=sorted_module_reports ) # To be sent to Slack channels model_failure_sections = [{"type": "section", "text": {"type": "mrkdwn", "text": model_failures_report}}] model_failure_sections.append({"type": "section", "text": {"type": "mrkdwn", "text": module_failures_report}}) # Save the complete (i.e. no truncation) failure tables (of the current workflow run) # (to be uploaded as artifacts) model_failures_report = prepare_reports( title=f"These following {label} modules had failures", header=model_header, reports=sorted_model_reports, to_truncate=False, ) file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/model_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(model_failures_report) module_failures_report = prepare_reports( title=f"The following {label} modules had failures", header=module_header, reports=sorted_module_reports, to_truncate=False, ) file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/module_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(module_failures_report) if self.prev_ci_artifacts is not None: # if the last run produces artifact named `ci_results_{job_name}` if ( f"ci_results_{job_name}" in self.prev_ci_artifacts and "model_failures_report.txt" in self.prev_ci_artifacts[f"ci_results_{job_name}"] ): # Compute the difference of the previous/current (model failure) table prev_model_failures = self.prev_ci_artifacts[f"ci_results_{job_name}"]["model_failures_report.txt"] entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures) if len(entries_changed) > 0: # Save the complete difference diff_report = prepare_reports( title="Changed model modules failures", header=model_header, reports=entries_changed, to_truncate=False, ) file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/changed_model_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(diff_report) # To be sent to Slack channels diff_report = prepare_reports( title="*Changed model modules failures*", header=model_header, reports=entries_changed, ) model_failure_sections.append( {"type": "section", "text": {"type": "mrkdwn", "text": diff_report}}, ) return model_failure_sections @property def additional_failures(self) -> dict: failures = {k: v["failed"] for k, v in self.additional_results.items()} errors = {k: v["error"] for k, v in self.additional_results.items()} individual_reports = [] for key, value in failures.items(): device_report = self.get_device_report(value) if sum(value.values()) or errors[key]: report = f"{key}" if errors[key]: report = f"[Errored out] {report}" if device_report: report = f"{device_report}{report}" individual_reports.append(report) header = "Single | Multi | Category\n" failures_report = prepare_reports( title="The following non-modeling tests had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}} @property def payload(self) -> str: blocks = [self.header] if self.ci_title: blocks.append(self.ci_title_section) if self.n_model_failures > 0 or self.n_additional_failures > 0: blocks.append(self.failures) if self.n_model_failures > 0: block = self.category_failures if block["text"]["text"]: blocks.append(block) for block in self.model_failures: if block["text"]["text"]: blocks.append(block) if self.n_additional_failures > 0: blocks.append(self.additional_failures) if self.n_model_failures == 0 and self.n_additional_failures == 0: blocks.append(self.no_failures) if len(self.selected_warnings) > 0: blocks.append(self.warnings) new_failure_blocks = [] for idx, (prev_workflow_run_id, prev_ci_artifacts) in enumerate( [self.prev_ci_artifacts] + self.other_ci_artifacts ): if idx == 0: # This is the truncated version to show on slack. For now. new_failure_blocks = self.get_new_model_failure_blocks( prev_ci_artifacts=prev_ci_artifacts, with_header=False ) # To save the list of new model failures and uploaed to hub repositories extra_blocks = self.get_new_model_failure_blocks(prev_ci_artifacts=prev_ci_artifacts, to_truncate=False) if extra_blocks: filename = "new_failures" if idx > 0: filename = f"{filename}_against_{prev_workflow_run_id}" failure_text = extra_blocks[-1]["text"]["text"] file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(failure_text) # upload results to Hub dataset file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.txt") _ = api.upload_file( path_or_fileobj=file_path, path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{filename}.txt", repo_id=report_repo_id, repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) # extra processing to save to json format new_failed_tests = {} nb_new_failed_tests = 0 for line in failure_text.split(): if "https://github.com/huggingface/transformers/actions/runs" in line: pattern = r"<(https://github.com/huggingface/transformers/actions/runs/.+?/job/.+?)\|(.+?)>" items = re.findall(pattern, line) elif "tests/" in line: # TODO: Improve the condition here. if "tests/models/" in line or ( "tests/quantization/" in line and job_name == "run_quantization_torch_gpu" ): model = line.split("/")[2] else: model = line.split("/")[1] if model not in new_failed_tests: new_failed_tests[model] = {"single-gpu": [], "multi-gpu": []} for _, device in items: new_failed_tests[model][f"{device}-gpu"].append(line) nb_new_failed_tests += 1 file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.json") with open(file_path, "w", encoding="UTF-8") as fp: json.dump(new_failed_tests, fp, ensure_ascii=False, indent=4) # upload results to Hub dataset file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.json") commit_info = api.upload_file( path_or_fileobj=file_path, path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{filename}.json", repo_id=report_repo_id, repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) new_failures_url = f"https://huggingface.co/datasets/{report_repo_id}/raw/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/{filename}.json" if idx == 0: block = { "type": "section", "text": { "type": "mrkdwn", "text": f"*There are {nb_new_failed_tests} new failed tests*\n\n(compared to previous run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check new failures"}, "url": new_failures_url, }, } blocks.append(block) else: block = { "type": "section", "text": { "type": "mrkdwn", # TODO: We should NOT assume it's always Nvidia CI, but it's the case at this moment. "text": f"*There are {nb_new_failed_tests} failed tests unique to this run*\n\n(compared to{' Nvidia CI ' if is_scheduled_ci_run else ' '}run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check failures"}, "url": new_failures_url, }, } blocks.append(block) if diff_file_url is not None: block = { "type": "section", "text": { "type": "mrkdwn", "text": f"*Test results diff*\n\n(compared to previous run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check test result diff file"}, "url": diff_file_url, }, } blocks.append(block) if len(new_failure_blocks) > 0: blocks.extend(new_failure_blocks) return json.dumps(blocks) @staticmethod def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False): blocks = [] title_block = {"type": "header", "text": {"type": "plain_text", "text": title}} blocks.append(title_block) if ci_title: ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}} blocks.append(ci_title_block) offline_runners = [] if runner_not_available: text = "💔 CI runners are not available! Tests are not run. 😭" result = os.environ.get("OFFLINE_RUNNERS") if result is not None: offline_runners = json.loads(result) elif runner_failed: text = "💔 CI runners have problems! Tests are not run. 😭" elif setup_failed: text = "💔 Setup job failed. Tests are not run. 😭" else: text = "💔 There was an issue running the tests. 😭" error_block_1 = { "type": "header", "text": { "type": "plain_text", "text": text, }, } text = "" if len(offline_runners) > 0: text = "\n • " + "\n • ".join(offline_runners) text = f"The following runners are offline:\n{text}\n\n" text += "🙏 Let's fix it ASAP! 🙏" error_block_2 = { "type": "section", "text": { "type": "plain_text", "text": text, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } blocks.extend([error_block_1, error_block_2]) payload = json.dumps(blocks) print("Sending the following payload") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=text, blocks=payload, ) def post(self): payload = self.payload print("Sending the following payload") print(json.dumps({"blocks": json.loads(payload)})) text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, blocks=payload, text=text, ) def get_reply_blocks(self, job_name, job_result, failures, device, text): """ failures: A list with elements of the form {"line": full test name, "trace": error trace} """ # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary MAX_ERROR_TEXT = 3000 - len("[Truncated]") failure_text = "" for idx, error in enumerate(failures): new_text = failure_text + f"*{error['line']}*\n_{error['trace']}_\n\n" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text title = job_name if device is not None: title += f" ({device}-gpu)" content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} # TODO: Make sure we always have a valid job link (or at least a way not to break the report sending) # Currently we get the device from a job's artifact name. # If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`. # This could be done by adding `machine_type` in a job's `strategy`. # (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`) if job_result["job_link"] is not None and job_result["job_link"][device] is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_result["job_link"][device], } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ] def get_new_model_failure_blocks(self, prev_ci_artifacts, with_header=True, to_truncate=True): if prev_ci_artifacts is None: return [] if len(self.model_results) > 0: target_results = self.model_results else: target_results = self.additional_results[job_to_test_map[job_name]] # Make the format uniform between `model_results` and `additional_results[XXX]` if "failures" in target_results: target_results = {job_name: target_results} sorted_dict = sorted(target_results.items(), key=lambda t: t[0]) job = job_to_test_map[job_name] prev_model_results = {} if ( f"ci_results_{job_name}" in prev_ci_artifacts and f"{test_to_result_name[job]}_results.json" in prev_ci_artifacts[f"ci_results_{job_name}"] ): prev_model_results = json.loads( prev_ci_artifacts[f"ci_results_{job_name}"][f"{test_to_result_name[job]}_results.json"] ) # Make the format uniform between `model_results` and `additional_results[XXX]` if "failures" in prev_model_results: prev_model_results = {job_name: prev_model_results} all_failure_lines = {} for job, job_result in sorted_dict: if len(job_result["failures"]): devices = sorted(job_result["failures"].keys(), reverse=True) for device in devices: failures = job_result["failures"][device] prev_error_lines = {} if job in prev_model_results and device in prev_model_results[job]["failures"]: prev_error_lines = {error["line"] for error in prev_model_results[job]["failures"][device]} url = None if job_result["job_link"] is not None and job_result["job_link"][device] is not None: url = job_result["job_link"][device] for idx, error in enumerate(failures): if error["line"] in prev_error_lines: continue new_text = f"{error['line']}\n\n" if new_text not in all_failure_lines: all_failure_lines[new_text] = [] all_failure_lines[new_text].append(f"<{url}|{device}>" if url is not None else device) MAX_ERROR_TEXT = 3000 - len("[Truncated]") - len("```New failures```\n\n") if not to_truncate: MAX_ERROR_TEXT = float("inf") failure_text = "" for line, devices in all_failure_lines.items(): new_text = failure_text + f"{'|'.join(devices)} gpu\n{line}" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text blocks = [] if failure_text: if with_header: blocks.append( {"type": "header", "text": {"type": "plain_text", "text": "New failures", "emoji": True}} ) else: failure_text = f"{failure_text}" blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}) return blocks def post_reply(self): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): for device, failures in job_result["failures"].items(): text = "\n".join( sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]]) ) blocks = self.get_reply_blocks(job, job_result, failures, device, text=text) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) for job, job_result in self.additional_results.items(): if len(job_result["failures"]): for device, failures in job_result["failures"].items(): blocks = self.get_reply_blocks( job, job_result, failures, device, text=f"Number of failures: {job_result['failed'][device]}", ) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) def retrieve_artifact(artifact_path: str, gpu: Optional[str]): if gpu not in [None, "single", "multi"]: raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.") _artifact = {} if os.path.exists(artifact_path): files = os.listdir(artifact_path) for file in files: try: with open(os.path.join(artifact_path, file)) as f: _artifact[file.split(".")[0]] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(artifact_path, file)}.") from e return _artifact def retrieve_available_artifacts(): class Artifact: def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False): self.name = name self.single_gpu = single_gpu self.multi_gpu = multi_gpu self.paths = [] def __str__(self): return self.name def add_path(self, path: str, gpu: Optional[str] = None): self.paths.append({"name": self.name, "path": path, "gpu": gpu}) _available_artifacts: dict[str, Artifact] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: artifact_name = directory name_parts = artifact_name.split("_postfix_") if len(name_parts) > 1: artifact_name = name_parts[0] if artifact_name.startswith("single-gpu"): artifact_name = artifact_name[len("single-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].single_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="single") elif artifact_name.startswith("multi-gpu"): artifact_name = artifact_name[len("multi-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].multi_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="multi") else: if artifact_name not in _available_artifacts: _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts def prepare_reports(title, header, reports, to_truncate=True): report = "" MAX_ERROR_TEXT = 3000 - len("[Truncated]") if not to_truncate: MAX_ERROR_TEXT = float("inf") if len(reports) > 0: # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary for idx in range(len(reports)): _report = header + "\n".join(reports[: idx + 1]) new_report = f"{title}:\n```\n{_report}\n```\n" if len(new_report) > MAX_ERROR_TEXT: # `report` here has length <= 3000 report = report + "[Truncated]" break report = new_report return report def pop_default(l: list[Any], i: int, default: Any) -> Any: try: return l.pop(i) except IndexError: return default if __name__ == "__main__": api = HfApi() client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"] # runner_status = os.environ.get("RUNNER_STATUS") # runner_env_status = os.environ.get("RUNNER_ENV_STATUS") setup_status = os.environ.get("SETUP_STATUS") # runner_not_available = True if runner_status is not None and runner_status != "success" else False # runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False # Let's keep the lines regardig runners' status (we might be able to use them again in the future) runner_not_available = False runner_failed = False # Some jobs don't depend (`needs`) on the job `setup`: in this case, the status of the job `setup` is `skipped`. setup_failed = setup_status not in ["skipped", "success"] org = "huggingface" repo = "transformers" repository_full_name = f"{org}/{repo}" # This env. variable is set in workflow file (under the job `send_results`). ci_event = os.environ["CI_EVENT"] # To find the PR number in a commit title, for example, `Add AwesomeFormer model (#99999)` pr_number_re = re.compile(r"\(#(\d+)\)$") # Add Commit/PR title with a link for push CI # (check the title in 2 env. variables - depending on the CI is triggered via `push` or `workflow_run` event) ci_title_push = os.environ.get("CI_TITLE_PUSH") ci_title_workflow_run = os.environ.get("CI_TITLE_WORKFLOW_RUN") ci_title = ci_title_push if ci_title_push else ci_title_workflow_run ci_sha = os.environ.get("CI_SHA") ci_url = None if ci_sha: ci_url = f"https://github.com/{repository_full_name}/commit/{ci_sha}" if ci_title is not None: if ci_url is None: raise ValueError( "When a title is found (`ci_title`), it means a `push` event or a `workflow_run` even (triggered by " "another `push` event), and the commit SHA has to be provided in order to create the URL to the " "commit page." ) ci_title = ci_title.strip().split("\n")[0].strip() # Retrieve the PR title and author login to complete the report commit_number = ci_url.split("/")[-1] ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/commits/{commit_number}" ci_details = requests.get(ci_detail_url).json() ci_author = ci_details["author"]["login"] merged_by = None # Find the PR number (if any) and change the url to the actual PR page. numbers = pr_number_re.findall(ci_title) if len(numbers) > 0: pr_number = numbers[0] ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/pulls/{pr_number}" ci_details = requests.get(ci_detail_url).json() ci_author = ci_details["user"]["login"] ci_url = f"https://github.com/{repository_full_name}/pull/{pr_number}" merged_by = ci_details["merged_by"]["login"] if merged_by is None: ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author}" else: ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author} | Merged by: {merged_by}" elif ci_sha: ci_title = f"<{ci_url}|commit: {ci_sha}>" else: ci_title = "" # `title` will be updated at the end before calling `Message()`. title = f"🤗 Results of {ci_event}" if runner_not_available or runner_failed or setup_failed: Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed) exit(0) # sys.argv[0] is always `utils/notification_service.py`. arguments = sys.argv[1:] # In our usage in `.github/workflows/slack-report.yml`, we always pass an argument when calling this script. # The argument could be an empty string `""` if a job doesn't depend on the job `setup`. if arguments[0] == "": job_matrix = [] else: job_matrix_as_str = arguments[0] try: folder_slices = ast.literal_eval(job_matrix_as_str) if len(folder_slices) > 0: if isinstance(folder_slices[0], list): # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names). job_matrix = [ x.replace("models/", "models_").replace("quantization/", "quantization_") for folders in folder_slices for x in folders ] elif isinstance(folder_slices[0], str): job_matrix = [ x.replace("models/", "models_").replace("quantization/", "quantization_") for x in folder_slices ] except Exception: Message.error_out(title, ci_title) raise ValueError("Errored out.") github_actions_jobs = get_jobs( workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) github_actions_job_links = {job["name"]: job["html_url"] for job in github_actions_jobs} artifact_name_to_job_map = {} for job in github_actions_jobs: for step in job["steps"]: if step["name"].startswith("Test suite reports artifacts: "): artifact_name = step["name"][len("Test suite reports artifacts: ") :] artifact_name_to_job_map[artifact_name] = job break available_artifacts = retrieve_available_artifacts() test_categories = [ "PyTorch", "TensorFlow", "Flax", "Tokenizers", "Pipelines", "Trainer", "ONNX", "Auto", "Quantization", "Unclassified", ] job_name = os.getenv("CI_TEST_JOB") report_name_prefix = job_name # This dict will contain all the information relative to each model: # - Failures: the total, as well as the number of failures per-category defined above # - Success: total # - Time spent: as a comma-separated list of elapsed time # - Failures: as a line-break separated list of errors matrix_job_results = { matrix_name: { "failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in test_categories}, "errors": 0, "success": 0, "skipped": 0, "time_spent": [], "failures": {}, "job_link": {}, } for matrix_name in job_matrix if f"{report_name_prefix}_{matrix_name}_test_reports" in available_artifacts } unclassified_model_failures = [] for matrix_name in matrix_job_results: for artifact_path_dict in available_artifacts[f"{report_name_prefix}_{matrix_name}_test_reports"].paths: path = artifact_path_dict["path"] artifact_gpu = artifact_path_dict["gpu"] if path not in artifact_name_to_job_map: # Mismatch between available artifacts and reported jobs on github. It happens. continue artifact = retrieve_artifact(path, artifact_gpu) if "stats" in artifact: # Link to the GitHub Action job job = artifact_name_to_job_map[path] matrix_job_results[matrix_name]["job_link"][artifact_gpu] = job["html_url"] failed, errors, success, skipped, time_spent = handle_test_results(artifact["stats"]) matrix_job_results[matrix_name]["success"] += success matrix_job_results[matrix_name]["errors"] += errors matrix_job_results[matrix_name]["skipped"] += skipped matrix_job_results[matrix_name]["time_spent"].append(float(time_spent[:-1])) stacktraces = handle_stacktraces(artifact["failures_line"]) # TODO: ??? for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): # Avoid the extra `FAILED` entry given by `run_test_using_subprocess` causing issue when calling # `stacktraces.pop` below. # See `run_test_using_subprocess` in `src/transformers/testing_utils.py` if " - Failed: (subprocess)" in line: continue line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_gpu not in matrix_job_results[matrix_name]["failures"]: matrix_job_results[matrix_name]["failures"][artifact_gpu] = [] trace = pop_default(stacktraces, 0, "Cannot retrieve error message.") matrix_job_results[matrix_name]["failures"][artifact_gpu].append( {"line": line, "trace": trace} ) # TODO: How to deal wit this if re.search("tests/quantization", line): matrix_job_results[matrix_name]["failed"]["Quantization"][artifact_gpu] += 1 elif re.search("test_modeling_tf_", line): matrix_job_results[matrix_name]["failed"]["TensorFlow"][artifact_gpu] += 1 elif re.search("test_modeling_flax_", line): matrix_job_results[matrix_name]["failed"]["Flax"][artifact_gpu] += 1 elif re.search("test_modeling", line): matrix_job_results[matrix_name]["failed"]["PyTorch"][artifact_gpu] += 1 elif re.search("test_tokenization", line): matrix_job_results[matrix_name]["failed"]["Tokenizers"][artifact_gpu] += 1 elif re.search("test_pipelines", line): matrix_job_results[matrix_name]["failed"]["Pipelines"][artifact_gpu] += 1 elif re.search("test_trainer", line): matrix_job_results[matrix_name]["failed"]["Trainer"][artifact_gpu] += 1 elif re.search("onnx", line): matrix_job_results[matrix_name]["failed"]["ONNX"][artifact_gpu] += 1 elif re.search("auto", line): matrix_job_results[matrix_name]["failed"]["Auto"][artifact_gpu] += 1 else: matrix_job_results[matrix_name]["failed"]["Unclassified"][artifact_gpu] += 1 unclassified_model_failures.append(line) # Additional runs additional_files = { "PyTorch pipelines": "run_pipelines_torch_gpu_test_reports", "TensorFlow pipelines": "run_pipelines_tf_gpu_test_reports", "Examples directory": "run_examples_gpu_test_reports", "DeepSpeed": "run_torch_cuda_extensions_gpu_test_reports", } if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI"): del additional_files["Examples directory"] del additional_files["PyTorch pipelines"] del additional_files["TensorFlow pipelines"] elif ci_event.startswith("Scheduled CI (AMD)"): del additional_files["TensorFlow pipelines"] del additional_files["DeepSpeed"] elif ci_event.startswith("Push CI (AMD)"): additional_files = {} report_repo_id = os.getenv("REPORT_REPO_ID") # if it is not a scheduled run, upload the reports to a subfolder under `report_repo_folder` report_repo_subfolder = "" if os.getenv("GITHUB_EVENT_NAME") != "schedule": report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}" report_repo_subfolder = f"runs/{report_repo_subfolder}" workflow_run = get_last_daily_ci_run( token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv("GITHUB_RUN_ID") ) workflow_run_created_time = workflow_run["created_at"] workflow_id = workflow_run["workflow_id"] report_repo_folder = workflow_run_created_time.split("T")[0] if report_repo_subfolder: report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}" # Remove some entries in `additional_files` if they are not concerned. test_name = None if job_name in job_to_test_map: test_name = job_to_test_map[job_name] additional_files = {k: v for k, v in additional_files.items() if k == test_name} additional_results = { key: { "failed": {"unclassified": 0, "single": 0, "multi": 0}, "errors": 0, "success": 0, "skipped": 0, "time_spent": [], "error": False, "failures": {}, "job_link": {}, } for key in additional_files } for key in additional_results: # If a whole suite of test fails, the artifact isn't available. if additional_files[key] not in available_artifacts: additional_results[key]["error"] = True continue for artifact_path_dict in available_artifacts[additional_files[key]].paths: path = artifact_path_dict["path"] artifact_gpu = artifact_path_dict["gpu"] # Link to the GitHub Action job job = artifact_name_to_job_map[path] additional_results[key]["job_link"][artifact_gpu] = job["html_url"] artifact = retrieve_artifact(path, artifact_gpu) stacktraces = handle_stacktraces(artifact["failures_line"]) failed, errors, success, skipped, time_spent = handle_test_results(artifact["stats"]) additional_results[key]["failed"][artifact_gpu or "unclassified"] += failed additional_results[key]["success"] += success additional_results[key]["errors"] += errors additional_results[key]["skipped"] += skipped additional_results[key]["time_spent"].append(float(time_spent[:-1])) if len(artifact["errors"]): additional_results[key]["error"] = True if failed: for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): # Avoid the extra `FAILED` entry given by `run_test_using_subprocess` causing issue when calling # `stacktraces.pop` below. # See `run_test_using_subprocess` in `src/transformers/testing_utils.py` if " - Failed: (subprocess)" in line: continue line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_gpu not in additional_results[key]["failures"]: additional_results[key]["failures"][artifact_gpu] = [] trace = pop_default(stacktraces, 0, "Cannot retrieve error message.") additional_results[key]["failures"][artifact_gpu].append({"line": line, "trace": trace}) # Let's only check the warning for the model testing job. Currently, the job `run_extract_warnings` is only run # when `inputs.job` (in the workflow file) is `run_models_gpu`. The reason is: otherwise we need to save several # artifacts with different names which complicates the logic for an insignificant part of the CI workflow reporting. selected_warnings = [] if job_name == "run_models_gpu": if "warnings_in_ci" in available_artifacts: directory = available_artifacts["warnings_in_ci"].paths[0]["path"] with open(os.path.join(directory, "selected_warnings.json")) as fp: selected_warnings = json.load(fp) if not os.path.isdir(os.path.join(os.getcwd(), f"ci_results_{job_name}")): os.makedirs(os.path.join(os.getcwd(), f"ci_results_{job_name}")) nvidia_daily_ci_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml" amd_daily_ci_workflows = ( "huggingface/transformers/.github/workflows/self-scheduled-amd-mi325-caller.yml", "huggingface/transformers/.github/workflows/self-scheduled-amd-mi355-caller.yml", ) is_nvidia_daily_ci_workflow = os.environ.get("GITHUB_WORKFLOW_REF").startswith(nvidia_daily_ci_workflow) is_amd_daily_ci_workflow = os.environ.get("GITHUB_WORKFLOW_REF").startswith(amd_daily_ci_workflows) is_scheduled_ci_run = os.environ.get("GITHUB_EVENT_NAME") == "schedule" # For AMD workflow runs: the different AMD CI callers (MI210/MI250/MI300, etc.) are triggered by `workflow_run` # event of `.github/workflows/self-scheduled-amd-caller.yml`. if os.environ.get("GITHUB_EVENT_NAME") == "workflow_run": # Get the path to the file on the runner that contains the full event webhook payload. event_payload_path = os.environ.get("GITHUB_EVENT_PATH") # Load the event payload with open(event_payload_path) as fp: event_payload = json.load(fp) # The event that triggers the original `workflow_run`. if "workflow_run" in event_payload: is_scheduled_ci_run = event_payload["workflow_run"]["event"] == "schedule" test_name_and_result_pairs = [] if len(matrix_job_results) > 0: test_name = job_to_test_map[job_name] test_name_and_result_pairs.append((test_name, matrix_job_results)) for test_name, result in additional_results.items(): test_name_and_result_pairs.append((test_name, result)) for test_name, result in test_name_and_result_pairs: with open(f"ci_results_{job_name}/{test_to_result_name[test_name]}_results.json", "w", encoding="UTF-8") as fp: json.dump(result, fp, indent=4, ensure_ascii=False) api.upload_file( path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[test_name]}_results.json", path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{test_to_result_name[test_name]}_results.json", repo_id=report_repo_id, repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) # Let's create a file contain job --> job link if len(matrix_job_results) > 0: target_results = matrix_job_results else: default_result = { "failed": {"unclassified": 0, "single": 0, "multi": 0}, "success": 0, "time_spent": [], "error": False, "failures": {}, "job_link": {}, } key = job_to_test_map.get(job_name) target_results = additional_results.get(key, default_result) if key is not None else default_result # Make the format uniform between `model_results` and `additional_results[XXX]` if "failures" in target_results: target_results = {job_name: target_results} job_links = {} sorted_dict = sorted(target_results.items(), key=lambda t: t[0]) for job, job_result in sorted_dict: if job.startswith("models_"): job = job[len("models_") :] elif job.startswith("quantization_"): job = job[len("quantization_") :] job_links[job] = job_result["job_link"] with open(f"ci_results_{job_name}/job_links.json", "w", encoding="UTF-8") as fp: json.dump(job_links, fp, indent=4, ensure_ascii=False) api.upload_file( path_or_fileobj=f"ci_results_{job_name}/job_links.json", path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/job_links.json", repo_id=report_repo_id, repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) prev_workflow_run_id = None other_workflow_run_ids = [] if is_scheduled_ci_run: prev_workflow_run_id = get_last_daily_ci_workflow_run_id( token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=workflow_id ) # For a scheduled run that is not the Nvidia's scheduled daily CI, add Nvidia's scheduled daily CI run as a target to compare. if not is_nvidia_daily_ci_workflow: # The id of the workflow `.github/workflows/self-scheduled-caller.yml` (not of a workflow run of it). other_workflow_id = "90575235" # We need to get the Nvidia's scheduled daily CI run that match the current run (i.e. run with the same commit SHA) other_workflow_run_id = get_last_daily_ci_workflow_run_id( token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=other_workflow_id, commit_sha=ci_sha ) other_workflow_run_ids.append(other_workflow_run_id) else: prev_workflow_run_id = os.environ["PREV_WORKFLOW_RUN_ID"] other_workflow_run_id = os.environ["OTHER_WORKFLOW_RUN_ID"] other_workflow_run_ids.append(other_workflow_run_id) prev_ci_artifacts = (None, None) other_ci_artifacts = [] output_dir = os.path.join(os.getcwd(), "previous_reports") os.makedirs(output_dir, exist_ok=True) for idx, target_workflow_run_id in enumerate([prev_workflow_run_id] + other_workflow_run_ids): if target_workflow_run_id is None or target_workflow_run_id == "": continue else: artifact_names = [f"ci_results_{job_name}"] ci_artifacts = get_last_daily_ci_reports( artifact_names=artifact_names, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=target_workflow_run_id, ) if idx == 0: prev_ci_artifacts = (target_workflow_run_id, ci_artifacts) else: other_ci_artifacts.append((target_workflow_run_id, ci_artifacts)) # Only for AMD at this moment. # TODO: put this into a method diff_file_url = None if is_amd_daily_ci_workflow: if not (prev_workflow_run_id is None or prev_workflow_run_id == ""): ci_artifacts = get_last_daily_ci_reports( artifact_names=None, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=prev_workflow_run_id, ) current_artifacts = sorted([d for d in os.listdir() if os.path.isdir(d) and d.endswith("_test_reports")]) prev_artifacts = sorted([d for d in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, d)) and d.endswith("_test_reports")]) # fmt: skip current_artifacts_set = {} for d in current_artifacts: current_artifacts_set[d] = os.path.join(d, "summary_short.txt") prev_artifacts_set = {} for d in prev_artifacts: prev_artifacts_set[d] = os.path.join(output_dir, d, "summary_short.txt") report = compare_job_sets(prev_artifacts_set, current_artifacts_set) with open(f"ci_results_{job_name}/test_results_diff.json", "w") as fp: fp.write(report) # upload commit_info = api.upload_file( path_or_fileobj=f"ci_results_{job_name}/test_results_diff.json", path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/test_results_diff.json", repo_id=report_repo_id, repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) diff_file_url = f"https://huggingface.co/datasets/{report_repo_id}/resolve/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/test_results_diff.json" ci_name_in_report = "" if job_name in job_to_test_map: ci_name_in_report = job_to_test_map[job_name] title = f"🤗 Results of {ci_event}: {ci_name_in_report}" message = Message( title, ci_title, matrix_job_results, additional_results, selected_warnings=selected_warnings, prev_ci_artifacts=prev_ci_artifacts, other_ci_artifacts=other_ci_artifacts, ) # send report only if there is any failure (for push CI) if message.n_failures or (ci_event != "push" and not ci_event.startswith("Push CI (AMD)")): message.post() message.post_reply()
transformers/utils/notification_service.py/0
{ "file_path": "transformers/utils/notification_service.py", "repo_id": "transformers", "token_count": 32080 }
533
# Training customization TRL is designed with modularity in mind so that users to be able to efficiently customize the training loop for their needs. Below are some examples on how you can apply and test different techniques. Note: Although these examples use the DPOTrainer, the customization applies to most (if not all) trainers. ## Use different optimizers and schedulers By default, the `DPOTrainer` creates a `torch.optim.AdamW` optimizer. You can create and define a different optimizer and pass it to `DPOTrainer` as follows: ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from torch import optim from trl import DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") optimizer = optim.SGD(model.parameters(), lr=training_args.learning_rate) trainer = DPOTrainer( model=model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, optimizers=(optimizer, None), ) trainer.train() ``` ### Add a learning rate scheduler You can also play with your training by adding learning rate schedulers. ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from torch import optim from trl import DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") optimizer = optim.AdamW(model.parameters(), lr=training_args.learning_rate) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1) trainer = DPOTrainer( model=model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, optimizers=(optimizer, lr_scheduler), ) trainer.train() ``` ## Memory efficient fine-tuning by sharing layers Another tool you can use for more memory efficient fine-tuning is to share layers between the reference model and the model you want to train. ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from trl import create_reference_model, DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") ref_model = create_reference_model(model, num_shared_layers=6) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train[:1%]") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") trainer = DPOTrainer( model=model, ref_model=ref_model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, ) trainer.train() ``` ## Pass 8-bit reference models Since `trl` supports all keyword arguments when loading a model from `transformers` using `from_pretrained`, you can also leverage `load_in_8bit` from `transformers` for more memory efficient fine-tuning. Read more about 8-bit model loading in `transformers` [here](https://huggingface.co/docs/transformers/en/peft#load-in-8bit-or-4bit). ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from trl import DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") quantization_config = BitsAndBytesConfig(load_in_8bit=True) ref_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct", quantization_config= quantization_config) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") trainer = DPOTrainer( model=model, ref_model=ref_model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, ) trainer.train() ``` ## Use the accelerator cache optimizer When training large models, you should better handle the accelerator cache by iteratively clearing it. To do so, simply pass `optimize_device_cache=True` to `DPOConfig`: ```python training_args = DPOConfig(..., optimize_device_cache=True) ```
trl/docs/source/customization.md/0
{ "file_path": "trl/docs/source/customization.md", "repo_id": "trl", "token_count": 1522 }
534
# Reward Modeling [![](https://img.shields.io/badge/All_models-Reward_Trainer-blue)](https://huggingface.co/models?other=reward-trainer,trl) TRL supports custom reward modeling for anyone to perform reward modeling on their dataset and model. Check out a complete flexible example at [`examples/scripts/reward_modeling.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py). ## Expected dataset type The [`RewardTrainer`] requires a [*implicit prompt* preference dataset](dataset_formats#preference). It means that the dataset should only contain the columns `"chosen"` and `"rejected"` (and not `"prompt"`). The [`RewardTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. You can also use a pretokenized dataset, in which case the dataset should contain the following columns: `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`. ## Using the `RewardTrainer` After preparing your dataset, you can use the [`RewardTrainer`] in the same way as the `Trainer` class from 🤗 Transformers. You should pass an `AutoModelForSequenceClassification` model to the [`RewardTrainer`], along with a [`RewardConfig`] which configures the hyperparameters of the training. ### Leveraging 🤗 PEFT to train a reward model Just pass a `peft_config` in the keyword arguments of [`RewardTrainer`], and the trainer should automatically take care of converting the model into a PEFT model! ```python from peft import LoraConfig, TaskType from transformers import AutoModelForSequenceClassification, AutoTokenizer from trl import RewardTrainer, RewardConfig model = AutoModelForSequenceClassification.from_pretrained("gpt2") peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) ... trainer = RewardTrainer( model=model, args=training_args, processing_class=tokenizer, train_dataset=dataset, peft_config=peft_config, ) trainer.train() ``` ### Adding a margin to the loss As in the [Llama 2 paper](https://huggingface.co/papers/2307.09288), you can add a margin to the loss by adding a `margin` column to the dataset. The reward collator will automatically pass it through and the loss will be computed accordingly. ```python def add_margin(row): # Assume you have a score_chosen and score_rejected columns that you want to use to compute the margin return {'margin': row['score_chosen'] - row['score_rejected']} dataset = dataset.map(add_margin) ``` ### Centering rewards In many scenarios, it's preferable to ensure that a reward model's output is mean zero. This is often done by first calculating the model's average score and then subtracting it. [[Eisenstein et al., 2023]](https://huggingface.co/papers/2312.09244) proposed an auxiliary loss function designed to directly learn a centered reward model. This auxiliary loss minimizes the squared sum of the rewards, encouraging the model to naturally produce mean-zero outputs: $$\Big( R(p, r_1) + R(p, r_2) \Big)^2 $$ This auxiliary loss is combined with the main loss function, weighted by the parameter `center_rewards_coefficient` in the `[RewardConfig]`. By default, this feature is deactivated (`center_rewards_coefficient = None`). ```python training_args = RewardConfig( center_rewards_coefficient=0.01, ... ) ``` For reference results, please refer PR [#1932](https://github.com/huggingface/trl/pull/1932). ## RewardTrainer [[autodoc]] RewardTrainer - train - save_model - push_to_hub ## RewardConfig [[autodoc]] RewardConfig
trl/docs/source/reward_trainer.md/0
{ "file_path": "trl/docs/source/reward_trainer.md", "repo_id": "trl", "token_count": 1163 }
535
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 0. imports import os from dataclasses import dataclass, field from typing import Optional import torch from accelerate import Accelerator from datasets import Dataset, load_dataset from peft import LoraConfig from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed from trl import DPOConfig, DPOTrainer # Define and parse arguments. @dataclass class ScriptArguments: """ The arguments for the DPO training script. """ # data parameters beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"}) # training parameters model_name_or_path: Optional[str] = field( default="../sft/results/final_checkpoint", metadata={"help": "the location of the SFT model name or path"}, ) learning_rate: Optional[float] = field(default=5e-4, metadata={"help": "optimizer learning rate"}) lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"}) warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"}) weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"}) optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"}) per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "train batch size per device"}) per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "eval batch size per device"}) gradient_accumulation_steps: Optional[int] = field( default=4, metadata={"help": "the number of gradient accumulation steps"} ) gradient_checkpointing: Optional[bool] = field( default=True, metadata={"help": "whether to use gradient checkpointing"} ) gradient_checkpointing_use_reentrant: Optional[bool] = field( default=False, metadata={"help": "whether to use reentrant for gradient checkpointing"} ) lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) max_prompt_length: Optional[int] = field(default=512, metadata={"help": "the maximum prompt length"}) max_length: Optional[int] = field(default=1024, metadata={"help": "the maximum sequence length"}) max_steps: Optional[int] = field(default=1000, metadata={"help": "max number of training steps"}) logging_steps: Optional[int] = field(default=10, metadata={"help": "the logging frequency"}) save_steps: Optional[int] = field(default=100, metadata={"help": "the saving frequency"}) eval_steps: Optional[int] = field(default=100, metadata={"help": "the evaluation frequency"}) output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"}) log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"}) load_in_4bit: Optional[bool] = field(default=True, metadata={"help": "whether to load the model in 4bit"}) model_dtype: Optional[str] = field( default="float16", metadata={"help": "model_dtype[float16, bfloat16, float] for loading."} ) # instrumentation report_to: Optional[str] = field( default="wandb", metadata={ "help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,' '`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. ' 'Use `"all"` to report to all integrations installed, `"none"` for no integrations.' }, ) # debug argument for distributed training ignore_bias_buffers: Optional[bool] = field( default=False, metadata={ "help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See" "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992" }, ) seed: Optional[int] = field( default=0, metadata={"help": "Random seed that will be set at the beginning of training."} ) def get_stack_exchange_paired( data_dir: str = "data/rl", cache_dir: Optional[str] = None, num_proc=24, ) -> Dataset: """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format. The dataset is converted to a dictionary with the following structure: { 'prompt': list[str], 'chosen': list[str], 'rejected': list[str], } Prompts are structured as follows: "Question: " + <prompt> + "\n\nAnswer: " """ dataset = load_dataset( "lvwerra/stack-exchange-paired", split="train", cache_dir=cache_dir, data_dir=data_dir, verification_mode="no_checks", ) original_columns = dataset.column_names def return_prompt_and_responses(samples) -> dict[str, str]: return { "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]], "chosen": samples["response_j"], "rejected": samples["response_k"], } return dataset.map( return_prompt_and_responses, batched=True, num_proc=num_proc, remove_columns=original_columns, ) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] set_seed(script_args.seed) # 1. load a pretrained model torch_dtype = torch.float if script_args.model_dtype == "float16": torch_dtype = torch.float16 elif script_args.model_dtype == "bfloat16": torch_dtype = torch.bfloat16 model = AutoModelForCausalLM.from_pretrained( script_args.model_name_or_path, low_cpu_mem_usage=True, torch_dtype=torch_dtype, load_in_4bit=script_args.load_in_4bit, device_map={"": Accelerator().local_process_index}, ) model.config.use_cache = False if script_args.ignore_bias_buffers: # torch distributed hack model._ddp_params_and_buffers_to_ignore = [ name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") tokenizer.pad_token = tokenizer.eos_token # 2. Load the Stack-exchange paired dataset train_dataset = get_stack_exchange_paired(data_dir="data/rl") train_dataset = train_dataset.filter( lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length, num_proc=script_args.num_proc, ) # 3. Load evaluation dataset eval_dataset = get_stack_exchange_paired(data_dir="data/evaluation") eval_dataset = eval_dataset.filter( lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length, num_proc=script_args.num_proc, ) # 4. initialize training arguments: training_args = DPOConfig( per_device_train_batch_size=script_args.per_device_train_batch_size, per_device_eval_batch_size=script_args.per_device_eval_batch_size, max_steps=script_args.max_steps, logging_steps=script_args.logging_steps, save_steps=script_args.save_steps, gradient_accumulation_steps=script_args.gradient_accumulation_steps, gradient_checkpointing=script_args.gradient_checkpointing, learning_rate=script_args.learning_rate, eval_strategy="steps", eval_steps=script_args.eval_steps, output_dir=script_args.output_dir, report_to=script_args.report_to, lr_scheduler_type=script_args.lr_scheduler_type, warmup_steps=script_args.warmup_steps, optim=script_args.optimizer_type, bf16=True, remove_unused_columns=False, run_name="dpo_llama2", gradient_checkpointing_kwargs=dict(use_reentrant=script_args.gradient_checkpointing_use_reentrant), seed=script_args.seed, ) peft_config = LoraConfig( r=script_args.lora_r, lora_alpha=script_args.lora_alpha, lora_dropout=script_args.lora_dropout, target_modules=[ "q_proj", "v_proj", "k_proj", "out_proj", "fc_in", "fc_out", "wte", ], bias="none", task_type="CAUSAL_LM", ) # 5. initialize the DPO trainer dpo_trainer = DPOTrainer( model, ref_model=None, args=training_args, beta=script_args.beta, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=tokenizer, peft_config=peft_config, max_prompt_length=script_args.max_prompt_length, max_length=script_args.max_length, ) # 6. train dpo_trainer.train() dpo_trainer.save_model(script_args.output_dir) # 7. save output_dir = os.path.join(script_args.output_dir, "final_checkpoint") dpo_trainer.model.save_pretrained(output_dir)
trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py", "repo_id": "trl", "token_count": 3980 }
536
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "Pillow>=9.4.0", # ] # /// """ pip install pillow # Tested on 8x H100 GPUs accelerate launch \ --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ examples/scripts/sft_vlm.py \ --dataset_name HuggingFaceH4/llava-instruct-mix-vsft \ --model_name_or_path llava-hf/llava-1.5-7b-hf \ --gradient_accumulation_steps 8 \ --output_dir LLaVA-1.5-7B-SFT \ --torch_dtype bfloat16 For LLaVA-NeXT, use: (requires transformers>=4.45) --model_name_or_path llava-hf/llava-v1.6-mistral-7b-hf For meta-llama/Llama-3.2-11B-Vision-Instruct, use: (requires transformers>=4.45.1) --model_name_or_path meta-llama/Llama-3.2-11B-Vision-Instruct accelerate launch \ --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ examples/scripts/sft_vlm.py \ --dataset_name HuggingFaceH4/llava-instruct-mix-vsft \ --model_name_or_path HuggingFaceTB/SmolVLM-Instruct \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 1 \ --output_dir SmolVLM-SFT \ --torch_dtype bfloat16 \ --use_peft \ --lora_target_modules down_proj, o_proj, k_proj, q_proj, gate_proj, up_proj, v_proj """ import torch from datasets import load_dataset from transformers import AutoModelForImageTextToText from trl import ( ModelConfig, ScriptArguments, SFTConfig, SFTTrainer, TrlParser, get_kbit_device_map, get_peft_config, get_quantization_config, ) if __name__ == "__main__": parser = TrlParser((ScriptArguments, SFTConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_and_config() training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) training_args.max_length = None ################ # Model, Tokenizer & Processor ################ torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForImageTextToText.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs ) ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) ################ # Training ################ trainer = SFTTrainer( model=model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, peft_config=get_peft_config(model_args), ) trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/sft_vlm.py/0
{ "file_path": "trl/examples/scripts/sft_vlm.py", "repo_id": "trl", "token_count": 1552 }
537
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers.testing_utils import require_peft from trl import CPOConfig, CPOTrainer from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE from .testing_utils import TrlTestCase class CPOTrainerTester(TrlTestCase): def setUp(self): super().setUp() self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration" self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) self.t5_tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE @parameterized.expand( [ ("qwen", "sigmoid", "standard_preference"), ("t5", "hinge", "standard_implicit_prompt_preference"), ("qwen", "ipo", "conversational_preference"), ("t5", "ipo", "conversational_implicit_prompt_preference"), ("qwen", "simpo", "standard_preference"), ("t5", "simpo", "standard_implicit_prompt_preference"), ("qwen", "hinge", "conversational_preference"), ] ) def test_cpo_trainer(self, name, loss_type, config_name): training_args = CPOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, loss_type=loss_type, cpo_alpha=1.0, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) if name == "qwen": model = self.model tokenizer = self.tokenizer elif name == "t5": model = self.t5_model tokenizer = self.t5_tokenizer training_args.is_encoder_decoder = True trainer = CPOTrainer( model=model, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.equal(param, new_param)) @parameterized.expand( [ ("standard_preference",), ("standard_implicit_prompt_preference",), ("conversational_preference",), ("conversational_implicit_prompt_preference",), ] ) @require_peft def test_cpo_trainer_with_lora(self, config_name): from peft import LoraConfig lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) training_args = CPOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, cpo_alpha=1.0, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) trainer = CPOTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): if "lora" in n: new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.equal(param, new_param)) def test_compute_metrics(self): dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") def dummy_compute_metrics(*args, **kwargs): return {"test": 0.0} training_args = CPOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, remove_unused_columns=False, do_eval=True, eval_strategy="steps", eval_steps=1, per_device_eval_batch_size=2, report_to="none", ) trainer = CPOTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], compute_metrics=dummy_compute_metrics, ) trainer.train() self.assertEqual(trainer.state.log_history[-2]["eval_test"], 0.0) def test_alphapo_trainer(self): training_args = CPOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, loss_type="alphapo", alpha=0.5, simpo_gamma=0.5, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference") trainer = CPOTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: self.assertFalse(torch.equal(param, new_param))
trl/tests/test_cpo_trainer.py/0
{ "file_path": "trl/tests/test_cpo_trainer.py", "repo_id": "trl", "token_count": 3650 }
538
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer from transformers.testing_utils import require_peft from transformers.utils import is_peft_available from trl import PPOConfig, PPOTrainer from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE from .testing_utils import TrlTestCase if is_peft_available(): from peft import LoraConfig class TestPPOTrainer(TrlTestCase): def setUp(self): super().setUp() # Set up the models and tokenizer using the test model self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, padding_side="left") self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) if self.tokenizer.chat_template is None: self.tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE # Add reward and value models as in ppo.py reward_model_id = "trl-internal-testing/tiny-Qwen2ForSequenceClassification-2.5" self.value_model = AutoModelForSequenceClassification.from_pretrained(reward_model_id, num_labels=1) self.reward_model = AutoModelForSequenceClassification.from_pretrained(reward_model_id, num_labels=1) # Load dataset raw_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") def tokenize(example, tokenizer): tokenized = tokenizer(text=example["prompt"]) if tokenizer.eos_token_id is not None and tokenized["input_ids"][-1] != tokenizer.eos_token_id: tokenized["input_ids"] = tokenized["input_ids"] + [tokenizer.eos_token_id] tokenized["attention_mask"] = tokenized["attention_mask"] + [1] return tokenized self.raw_dataset = raw_dataset.map(tokenize, fn_kwargs={"tokenizer": self.tokenizer}, remove_columns="prompt") def test_basic_training(self): """Test basic PPO training configuration and verify model updates.""" # Capture initial weights initial_critic_weights = {} initial_policy_weights = {} for name, param in self.value_model.named_parameters(): initial_critic_weights[name] = param.clone().detach() for name, param in self.model.named_parameters(): initial_policy_weights[name] = param.clone().detach() # Configure training args similar to example script training_args = PPOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=4, per_device_eval_batch_size=2, num_ppo_epochs=2, # Decrease number of PPO epochs to speed up test report_to="none", ) # Create trainer trainer = PPOTrainer( args=training_args, processing_class=self.tokenizer, model=self.model, ref_model=self.ref_model, reward_model=self.reward_model, value_model=self.value_model, train_dataset=self.raw_dataset["train"], eval_dataset=self.raw_dataset["test"], ) # Train trainer.train() # Check if critic weights have been updated critic_weights_updated = False for name, param in trainer.model.value_model.named_parameters(): if not torch.allclose(initial_critic_weights[name], param.to("cpu")): critic_weights_updated = True break # Check if policy weights have been updated policy_weights_updated = False for name, param in trainer.model.policy.named_parameters(): if not torch.allclose(initial_policy_weights[name], param.to("cpu")): policy_weights_updated = True break self.assertTrue(critic_weights_updated, "Critic weights were not updated during training") self.assertTrue(policy_weights_updated, "Policy weights were not updated during training") @require_peft def test_peft_training(self): """Test PPO training with PEFT configuration and verify model updates.""" # Capture initial weights initial_critic_weights = {} initial_policy_weights = {} for name, param in self.value_model.named_parameters(): initial_critic_weights[name] = param.clone().detach() for name, param in self.model.named_parameters(): initial_policy_weights[name] = param.clone().detach() # Configure training args training_args = PPOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=4, per_device_eval_batch_size=2, num_ppo_epochs=2, # Decrease number of PPO epochs to speed up test report_to="none", ) # Configure PEFT peft_config = LoraConfig( r=32, lora_alpha=16, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # Create trainer with PEFT trainer = PPOTrainer( args=training_args, processing_class=self.tokenizer, model=self.model, ref_model=None, reward_model=self.reward_model, value_model=self.value_model, train_dataset=self.raw_dataset["train"], eval_dataset=self.raw_dataset["test"], peft_config=peft_config, ) # Train trainer.train() # Check if critic weights have been updated critic_weights_updated = False for name, param in trainer.model.value_model.named_parameters(): if name in initial_critic_weights and not torch.allclose(initial_critic_weights[name], param.to("cpu")): critic_weights_updated = True break # Check if policy weights have been updated - for PEFT we check the LoRA weights policy_weights_updated = False for name, param in trainer.model.policy.named_parameters(): if "lora" in name.lower() and param.requires_grad: # Only check LoRA weights # New weights should be non-zero if they've been updated if not torch.allclose(param, torch.zeros_like(param)): policy_weights_updated = True break self.assertTrue(critic_weights_updated, "Critic weights were not updated during training") self.assertTrue(policy_weights_updated, "Policy LoRA weights were not updated during training")
trl/tests/test_ppo_trainer.py/0
{ "file_path": "trl/tests/test_ppo_trainer.py", "repo_id": "trl", "token_count": 3040 }
539
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of https://github.com/pytorch/torchtune. import psutil import torch from accelerate import logging from torch import nn from torch.autograd.graph import saved_tensors_hooks logger = logging.get_logger(__name__) class OffloadActivations(saved_tensors_hooks): """ Context manager under which activation tensors created in the forward pass will be offloaded. Enable the memory efficiency technique of activation offloading, where activations bigger than `min_offload_size` bytes will be offloaded to CPU in the forward and brought back in the backward. This is in contrast to maintaining the activation on GPU VRAM throughout the program. This manager contains the option of using one additional CUDA stream to handle the communication between CUDA and CPU, which is intended to overlap with the default computation stream to improve runtime. We designed synchronization with a few heuristics for optimizing the tradeoff between runtime vs memory usage. Args: use_pin_memory (`bool`, *optional*, defaults to `True`): Whether to offloaded Tensor will be placed in pinned memory on the CPU. Pinned memory allows the Tensor to be moved back onto GPU more quickly but is a limited resource. use_streams (`bool`, *optional*, defaults to `True`): Whether to use streams for performance optimization where the communications get overlapped with the computation. Requires a torch build after torch-2.5.0. min_offload_size (`int`, *optional*, defaults to `1024`): Minimum number of bytes a Tensor must be in order to qualify for offloading. If the tensor is too small, we do not want to waste bandwidth and resources moving it to CPU and back. max_fwd_stash_size (`int`, *optional*, defaults to `5`): Maximum size of the forward stash, or the maximum number of consecutive activations to keep alive during the forward pass. This number must be at least 1. Keeping alive more activations will potentially allow more overlap between the communication and compute streams at the cost of increasing memory usage. Keeping alive fewer activations will conserve memory, but may cause poor overlap between the streams, increasing runtime. Raises: ValueError: if `max_fwd_stash_size` is not at least `1`. Example: ```python >>> with OffloadActivations(): ... outputs = model(inputs, labels=labels) >>> loss = outputs.loss >>> loss.backward() ``` """ def __init__( self, use_pin_memory: bool = True, use_streams: bool = True, min_offload_size: int = 1024, max_fwd_stash_size: int = 5, ) -> None: self.use_streams = use_streams self.min_tensor_size_bytes = min_offload_size # we don't want to bother with small tensors self.tracker = {} # tensor_id => (new_tensor, if_modified) ---> track what saved/offloaded tensors are where self.tensor_id = 0 self.is_first_forward_call = True self.is_first_backward_call = True self.is_first_forward_pass = True # Managing cpu memory self.use_pin_memory = use_pin_memory self.virtual_memory_safe_pct = 60 # we should not exceed this percentage of memory self.accelerator_type = ( torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" ) # NOTE: xpu doesn't have `default_stream` API, use `current_stream` instead self.s0 = ( torch.xpu.current_stream() if self.accelerator_type == "xpu" else torch.cuda.default_stream() ) # comp stream # For streaming if self.use_streams: self.s1 = torch.Stream() if self.accelerator_type == "xpu" else torch.cuda.Stream() # comms stream self.fwd_stash = {} # tensor_id => (activation, ev1) if max_fwd_stash_size < 1: raise ValueError(f"max_fwd_stash_size should be at least 1 but is {max_fwd_stash_size}") self.max_fwd_stash_size = max_fwd_stash_size self.bwd_tensor_stash = {} # tensor_id => activation self.bwd_ev_stash = {} # tensor_id => ev0 self.curr_graph_id = None self.curr_autograd_node = None # -------- platform util functions -------- # def verify_sufficient_virtual_memory(): curr_pct = get_cpu_ram_pct() if curr_pct > self.virtual_memory_safe_pct: logger.warning(f"{curr_pct=}% > {self.virtual_memory_safe_pct=}% of virtual memory used") def get_cpu_ram_pct() -> float: # get the percentage of memory used by the system return psutil.virtual_memory().percent def get_tensor_id() -> int: # create a unique id for each tensor we are managing self.tensor_id += 1 return self.tensor_id def get_num_bytes_tensor(x: torch.Tensor) -> int: # get the number of bytes in a tensor, for memory management purposes return x.element_size() * x.nelement() # x.element_size() * x._base_storage().nbytes() # -------- core pack / unpack work -------- # def pack_tensor(activation: torch.Tensor) -> int: # activations are passed in during forward pass - from here we take over and return a unique id if self.is_first_forward_call: if len(self.tracker) != 0: raise ValueError("Backward pass should have cleared tracker of all tensors") # set training phase trackers self.is_first_forward_call = False self.is_first_backward_call = True # query for basic tensor info num_bytes = get_num_bytes_tensor(activation) tensor_id = get_tensor_id() # only offload hefty bois if they're activations on CUDA (our heuristic # for that is to check if they're not params or buffers)! if ( activation.device.type in ["cuda", "xpu"] and num_bytes >= self.min_tensor_size_bytes and ( not isinstance(activation, torch.nn.Parameter) and not (hasattr(torch.nn, "Buffer") and isinstance(activation, torch.nn.Buffer)) ) ): if self.use_streams: # First, sync back and dereference previously offloaded tensors # as the offloading should be done sufficiently long ago. for id in list(self.fwd_stash.keys()): if id <= tensor_id - self.max_fwd_stash_size: _, ev = self.fwd_stash[id] self.s0.wait_event(ev) del self.fwd_stash[id] else: break # Sync in, offload, and add an event to sync back later self.s1.wait_stream(self.s0) stream = self.s1 if self.use_streams else self.s0 with stream if self.accelerator_type == "xpu" else torch.cuda.stream(stream): cpu_tensor = torch.empty_like(activation, pin_memory=self.use_pin_memory, device="cpu") cpu_tensor.copy_(activation, non_blocking=True) self.tracker[tensor_id] = ( cpu_tensor, True, # True = (in future) modified ) if self.use_streams: event = self.s1.record_event() # Stash to keep activation alive til s1 is done self.fwd_stash[tensor_id] = (activation, event) else: self.tracker[tensor_id] = ( activation, False, ) # False = not modified, tensor is as is return tensor_id def unpack_tensor_single_stream(unpack_tensor_id: int) -> torch.Tensor: # backward pass - we are called with the tensor_id, which # we will use to retrieve the saved/offloaded tensor if self.is_first_backward_call: if self.is_first_forward_pass: self.is_first_forward_pass = False if self.use_pin_memory: verify_sufficient_virtual_memory() self.is_first_backward_call = False self.is_first_forward_call = True if unpack_tensor_id not in self.tracker: raise ValueError(f"Untracked tensor with id {unpack_tensor_id}") maybe_accelerator_tensor, modified = self.tracker[unpack_tensor_id] if modified: accelerator_tensor = maybe_accelerator_tensor.to(self.accelerator_type, non_blocking=True) maybe_accelerator_tensor = accelerator_tensor # clear tensor from tracking del self.tracker[unpack_tensor_id] return maybe_accelerator_tensor def unpack_tensor_with_streams(unpack_tensor_id: int) -> torch.Tensor: # backward pass - we are called with the tensor_id, which # we will use to retrieve the saved/offloaded tensor if self.is_first_backward_call: self.curr_graph_id = torch._C._current_graph_task_id() def wait_and_del_remaining_references() -> None: for id in list(self.bwd_tensor_stash.keys()): event = self.bwd_ev_stash[id] self.s1.wait_event(event) del self.bwd_tensor_stash[id] # Register a callback to the end of autograd to clean everything up torch.autograd.variable.Variable._execution_engine.queue_callback(wait_and_del_remaining_references) if self.is_first_forward_pass: self.is_first_forward_pass = False if self.use_pin_memory: verify_sufficient_virtual_memory() self.is_first_backward_call = False self.is_first_forward_call = True if unpack_tensor_id not in self.tracker: raise ValueError(f"untracked tensor with id {unpack_tensor_id}") maybe_accelerator_tensor, modified = self.tracker[unpack_tensor_id] if modified: # Get data on the current autograd node graph_id = torch._C._current_graph_task_id() node = torch._C._current_autograd_node() prev_node_ids = [] # If we're on a new node, mark prev node's tensors to be freed later if graph_id == self.curr_graph_id and self.curr_autograd_node != node: self.curr_autograd_node = node prev_node_ids = list(self.bwd_tensor_stash.keys()) brought_back_from_cpu = True if unpack_tensor_id in self.fwd_stash: maybe_accelerator_tensor = self.fwd_stash[unpack_tensor_id][0] brought_back_from_cpu = False else: # Kick off the process to bring tensors back with self.s1 if self.accelerator_type == "xpu" else torch.cuda.stream(self.s1): accelerator_tensor = maybe_accelerator_tensor.to(self.accelerator_type, non_blocking=True) maybe_accelerator_tensor = accelerator_tensor # Tell comp stream to wait for the info to be loaded before executing self.s0.wait_stream(self.s1) # Stash the tensor to keep memory alive until compute stream is complete self.bwd_tensor_stash[unpack_tensor_id] = maybe_accelerator_tensor # Note: [Track views of the unpacked] # Why do we get the use count of the unpacked tensor here? We want an # initial count to compare to later, during the post-hook of the # backward node, when we need to decide whether we're allowed to free # the tensor yet. In what obscure cases must we delay freeing the # tensor (and thus call record_stream)? # 1. Any of the outputs of the backward node is a view of the unpacked # tensor. # 2. In the case that this unpacked tensor will be used in a # checkpointed region, if one of the recomputed saved tensors ends # up as a view of the unpacked tensor. # 3. The user abuses the system somehow and manually relies on the # unpacked tensor to exist after the backward node has executed. storage_refcount = torch._C._storage_Use_Count(maybe_accelerator_tensor.untyped_storage()._cdata) def hook(outputs, inputs): # create events for the current node inputs/outputs if they were streamed in if brought_back_from_cpu: # See Note: [Track views of the unpacked] # IF any of the outputs is a view of the tensor, OR if a view of # the tensor has been saved as a part of checkpoint's recompute # process, OR the user has abusedly incurred a reference on the # unpacked tensor, THEN the tensor might be used later and we # cannot presume to delete it after only the current node is # done! So we use our frenemy, record_stream, to ensure the # Tensor stays unmessed with until it's done getting used in the # compute stream (s0 here). Note that the con here is we introduce # non-deterministic (thus higher) memory usage, but this case # should not happen often. unpacked_tensor = self.bwd_tensor_stash[unpack_tensor_id] if torch._C._storage_Use_Count(unpacked_tensor.untyped_storage()._cdata) > storage_refcount: unpacked_tensor.record_stream(self.s0) del self.bwd_tensor_stash[unpack_tensor_id] else: event = self.s0.record_event() self.bwd_ev_stash[unpack_tensor_id] = event # if there are still things in the fwd_stash, get rid of them as we're in bwd now for id in list(self.fwd_stash.keys()): _, ev = self.fwd_stash[id] self.s0.wait_event(ev) del self.fwd_stash[id] # wait on prev node's events and del those for id in prev_node_ids: event = self.bwd_ev_stash[id] self.s1.wait_event(event) del self.bwd_tensor_stash[id] return outputs node.register_hook(hook) # clear tensor from tracking del self.tracker[unpack_tensor_id] return maybe_accelerator_tensor unpack_tensor = unpack_tensor_with_streams if self.use_streams else unpack_tensor_single_stream super().__init__(pack_tensor, unpack_tensor) class NoOpManager(saved_tensors_hooks): """ A `saved_tensors_hook` manager used to disable any other `saved_tensors_hook` manager applied before. This relies on the behavior that only the most recently registered `saved_tensors_hook` will run. One example usage is to opt a local region of code out of activations offloading, which is usually applied globally to best track state. """ def __init__(self) -> None: def noop(tensor): return tensor super().__init__(noop, noop) def get_act_offloading_ctx_manager( model: nn.Module, use_pin_memory: bool = True, use_streams: bool = True, min_offload_size: int = 1024, max_fwd_stash_size: int = 5, warn_if_no_head: bool = True, ) -> OffloadActivations: """ Returns the activation offloading context manager for the model. All but the last output Linear in every step will be offloaded. If activation offloading is enabled, we return the OffloadActivations context manager. If activation offloading is disabled, we return a NoOpManager context manager. Args: model (`nn.Module`): Model to wrap with the activation offloading context manager. use_pin_memory (`bool`, *optional*, defaults to `True`): Whether to offloaded Tensor will be placed in pinned memory on the CPU. Pinned memory allows the Tensor to be moved back onto GPU more quickly but is a limited resource. use_streams (`bool`, *optional*, defaults to `True`): Whether to use streams for performance optimization where the communications get overlapped with the computation. Requires a torch build after torch-2.5.0. min_offload_size (`int`, *optional*, defaults to `1024`): Minimum number of bytes a Tensor must be in order to qualify for offloading. If the tensor is too small, we do not want to waste bandwidth and resources moving it to CPU and back. max_fwd_stash_size (`int`, *optional*, defaults to `5`): Maximum size of the forward stash, or the maximum number of consecutive activations to keep alive during the forward pass. This number must be at least 1. Keeping alive more activations will potentially allow more overlap between the communication and compute streams at the cost of increasing memory usage. Keeping alive fewer activations will conserve memory, but may cause poor overlap between the streams, increasing runtime. warn_if_no_head (`bool`, *optional*, defaults to `True`): Whether to warn if no output head is detected. If set to `False`, no warning will be raised if no output head is detected. Returns: `contextlib.ContextDecorator`: Activation offloading context manager for the model. """ activations_handling_ctx = OffloadActivations( use_pin_memory=use_pin_memory, use_streams=use_streams, min_offload_size=min_offload_size, max_fwd_stash_size=max_fwd_stash_size, ) # Below is our hack to disable offloading the last output Linear in every # step, as the cost for offloading the activation and then soon after bringing # it back is expensive. output_head_detected = False noop_ctx = NoOpManager() # Try to get the actual model if it's wrapped unwrapped_model = model if hasattr(unwrapped_model, "module"): unwrapped_model = unwrapped_model.module # check for PEFT models if hasattr(unwrapped_model, "base_model") and hasattr(unwrapped_model, "peft_config"): unwrapped_model = unwrapped_model.base_model # Check for different types of output heads if hasattr(unwrapped_model, "output"): if isinstance(unwrapped_model.output, nn.Module): unwrapped_model.output.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) unwrapped_model.output.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True elif hasattr(unwrapped_model.output, "linear") and isinstance(unwrapped_model.output.linear, nn.Module): unwrapped_model.output.linear.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) unwrapped_model.output.linear.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True # Check for HuggingFace model output heads elif hasattr(unwrapped_model, "lm_head"): unwrapped_model.lm_head.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) unwrapped_model.lm_head.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True # Check for decoder-based models elif hasattr(unwrapped_model, "decoder"): decoder = unwrapped_model.decoder if hasattr(decoder, "output"): decoder.output.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) decoder.output.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True # Some models have lm_head in the decoder elif hasattr(decoder, "lm_head"): decoder.lm_head.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) decoder.lm_head.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True # Check for transformer models with final layer norm elif hasattr(unwrapped_model, "final_layer_norm") or hasattr(unwrapped_model, "ln_f"): final_norm = getattr(unwrapped_model, "final_layer_norm", None) or unwrapped_model.ln_f final_norm.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) final_norm.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True # Check for models with head module elif hasattr(unwrapped_model, "head") and isinstance(unwrapped_model.head, nn.Module): unwrapped_model.head.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) unwrapped_model.head.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) output_head_detected = True if not output_head_detected and warn_if_no_head: logger.warning( "During activation offloading, no output head was detected. If your model has an output head, it will be " "offloaded. This usually greatly slows training, given the large vocabulary size. To change this " "behavior, set your output head as model.output and make it an nn.Module. You can disable this warning by " "passing `warn_if_no_head=False`." ) # Disable offloading for any Liger modules for name, module in unwrapped_model.named_modules(): if "liger" in name.lower(): module.register_forward_pre_hook(lambda *args: noop_ctx.__enter__()) module.register_forward_hook(lambda *args: noop_ctx.__exit__(), always_call=True) return activations_handling_ctx
trl/trl/models/activation_offloading.py/0
{ "file_path": "trl/trl/models/activation_offloading.py", "repo_id": "trl", "token_count": 10269 }
540
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # ] # /// """ # Full training ``` python trl/scripts/sft.py \ --model_name_or_path Qwen/Qwen2-0.5B \ --dataset_name trl-lib/Capybara \ --learning_rate 2.0e-5 \ --num_train_epochs 1 \ --packing \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --eos_token '<|im_end|>' \ --eval_strategy steps \ --eval_steps 100 \ --output_dir Qwen2-0.5B-SFT \ --push_to_hub ``` # LoRA ``` python trl/scripts/sft.py \ --model_name_or_path Qwen/Qwen2-0.5B \ --dataset_name trl-lib/Capybara \ --learning_rate 2.0e-4 \ --num_train_epochs 1 \ --packing \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --eos_token '<|im_end|>' \ --eval_strategy steps \ --eval_steps 100 \ --use_peft \ --lora_r 32 \ --lora_alpha 16 \ --output_dir Qwen2-0.5B-SFT \ --push_to_hub ``` """ import argparse from accelerate import logging from datasets import load_dataset from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES from trl import ( DatasetMixtureConfig, ModelConfig, ScriptArguments, SFTConfig, SFTTrainer, TrlParser, clone_chat_template, get_dataset, get_kbit_device_map, get_peft_config, get_quantization_config, ) logger = logging.get_logger(__name__) def main(script_args, training_args, model_args, dataset_args): ################ # Model init kwargs & Tokenizer ################ quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, attn_implementation=model_args.attn_implementation, torch_dtype=model_args.torch_dtype, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) # Create model config = AutoConfig.from_pretrained(model_args.model_name_or_path) valid_image_text_architectures = MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES.values() if config.architectures and any(arch in valid_image_text_architectures for arch in config.architectures): from transformers import AutoModelForImageTextToText model = AutoModelForImageTextToText.from_pretrained(model_args.model_name_or_path, **model_kwargs) else: model = AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, **model_kwargs) # Create tokenizer tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, use_fast=True ) # Set default chat template if needed if tokenizer.chat_template is None: # TODO: source should be passed as an argument model, tokenizer = clone_chat_template(model, tokenizer, "Qwen/Qwen3-0.6B") # Load the dataset if dataset_args.datasets and script_args.dataset_name: logger.warning( "Both `datasets` and `dataset_name` are provided. The `datasets` argument will be used to load the " "dataset and `dataset_name` will be ignored." ) elif dataset_args.datasets and not script_args.dataset_name: dataset = get_dataset(dataset_args) elif not dataset_args.datasets and script_args.dataset_name: dataset = load_dataset( script_args.dataset_name, name=script_args.dataset_config, streaming=script_args.dataset_streaming ) else: raise ValueError("Either `datasets` or `dataset_name` must be provided.") # Initialize the SFT trainer trainer = SFTTrainer( model=model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), ) # Train the model trainer.train() # Save and push to Hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) def make_parser(subparsers: argparse._SubParsersAction = None): dataclass_types = (ScriptArguments, SFTConfig, ModelConfig, DatasetMixtureConfig) if subparsers is not None: parser = subparsers.add_parser("sft", help="Run the SFT training script", dataclass_types=dataclass_types) else: parser = TrlParser(dataclass_types) return parser if __name__ == "__main__": parser = make_parser() # When using the trl cli, this script may be run with additional arguments, corresponding accelerate arguments. # To ensure that their parsing does not interfere with the script arguments, parse the arguments with # `return_remaining_strings=True`, then ignore the remaining strings. script_args, training_args, model_args, dataset_args, _ = parser.parse_args_and_config( return_remaining_strings=True ) main(script_args, training_args, model_args, dataset_args)
trl/trl/scripts/sft.py/0
{ "file_path": "trl/trl/scripts/sft.py", "repo_id": "trl", "token_count": 2328 }
541
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from transformers import TrainingArguments from .sft_config import SFTConfig @dataclass class GKDConfig(SFTConfig): """ Configuration class for [`GKDTrainer`]. This class includes only the parameters that are specific to GKD training. For a full list of training arguments, please refer to the [`~transformers.TrainingArguments`] and [`SFTConfig`] documentation. Args: temperature (`float`, *optional*, defaults to `0.9`): Temperature for sampling. The higher the temperature, the more random the completions. lmbda (`float`, *optional*, defaults to `0.5`): Lambda parameter that controls the student data fraction (i.e., the proportion of on-policy student-generated outputs). beta (`float`, *optional*, defaults to `0.5`): Interpolation coefficient between `0.0` and `1.0` of the Generalized Jensen-Shannon Divergence loss. When beta is `0.0`, the loss is the KL divergence. When beta is `1.0`, the loss is the Inverse KL Divergence. max_new_tokens (`int`, *optional*, defaults to `128`): Maximum number of tokens to generate per completion. teacher_model_name_or_path (`str` or `None`, *optional*, defaults to `None`): Model name or path of the teacher model. If `None`, the teacher model will be the same as the model being trained. teacher_model_init_kwargs (`dict[str, Any]]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the teacher model from a string. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model. seq_kd (`bool`, *optional*, defaults to `False`): Seq_kd parameter that controls whether to perform Sequence-Level KD (can be viewed as supervised FT on teacher-generated output). """ _VALID_DICT_FIELDS = TrainingArguments._VALID_DICT_FIELDS + ["teacher_model_init_kwargs"] temperature: float = field( default=0.9, metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."}, ) lmbda: float = field( default=0.5, metadata={ "help": "Lambda parameter that controls the student data fraction (i.e., the proportion of on-policy " "student-generated outputs)." }, ) beta: float = field( default=0.5, metadata={ "help": "Interpolation coefficient between `0.0` and `1.0` of the Generalized Jensen-Shannon Divergence " "loss. When beta is `0.0`, the loss is the KL divergence. When beta is `1.0`, the loss is the Inverse KL " "Divergence." }, ) max_new_tokens: int = field( default=128, metadata={"help": "Maximum number of tokens to generate per completion."}, ) teacher_model_name_or_path: Optional[str] = field( default=None, metadata={ "help": "Model name or path of the teacher model. If `None`, the teacher model will be the same as the " "model being trained." }, ) teacher_model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the " "teacher model from a string." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropouts in `model`."}, ) seq_kd: bool = field( default=False, metadata={ "help": "Seq_kd parameter that controls whether to perform Sequence-Level KD (can be viewed as supervised " "FT on teacher-generated output)." }, ) def __post_init__(self): super().__post_init__() # check lmbda and beta are in the range [0, 1] if self.lmbda < 0.0 or self.lmbda > 1.0: raise ValueError("lmbda must be in the range [0.0, 1.0].") if self.beta < 0.0 or self.beta > 1.0: raise ValueError("beta must be in the range [0.0, 1.0].")
trl/trl/trainer/gkd_config.py/0
{ "file_path": "trl/trl/trainer/gkd_config.py", "repo_id": "trl", "token_count": 1873 }
542
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Literal, Optional from ..trainer.utils import OnPolicyConfig @dataclass class PPOConfig(OnPolicyConfig): r""" Configuration class for the [`PPOTrainer`]. This class includes only the parameters that are specific to PPO training. For a full list of training arguments, please refer to the [`~transformers.TrainingArguments`] and [`OnPolicyConfig`] documentation. Note that default values in this class may differ from those in [`~transformers.TrainingArguments`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: exp_name (`str`, *optional*, defaults to `os.path.basename(__file__)[:-3]`): Name of this experiment. reward_model_path (`str`, *optional*, defaults to `"EleutherAI/pythia-160m"`): Path to the reward model. model_adapter_name (`str` or `None`, *optional*, defaults to `None`): Name of the train target PEFT adapter, when using LoRA with multiple adapters. ref_adapter_name (`str` or `None`, *optional*, defaults to `None`): Name of the reference PEFT adapter, when using LoRA with multiple adapters. num_ppo_epochs (`int`, *optional*, defaults to `4`): Number of epochs to train. whiten_rewards (`bool`, *optional*, defaults to `False`): Whether to whiten the rewards. kl_coef (`float`, *optional*, defaults to `0.05`): KL coefficient. kl_estimator (`Literal["k1", "k3"]`, *optional*, defaults to `"k1"`): Which estimator for KL-Divergence to use from [Approximating KL Divergence](http://joschu.net/blog/kl-approx.html). Defaults to "k1", a straightforward, unbiased estimator. Can be set to "k3", an unbiased estimator with lower variance which "appears to be a strictly better estimator". Cannot be set to "k2", as it is used for logging purposes. cliprange (`float`, *optional*, defaults to `0.2`): Clip range. vf_coef (`float`, *optional*, defaults to `0.1`): Value function coefficient. cliprange_value (`float`, *optional*, defaults to `0.2`): Clip range for the value function. gamma (`float`, *optional*, defaults to `1.0`): Discount factor. lam (`float`, *optional*, defaults to `0.95`): Lambda value for GAE. ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, improving generation speed. However, disabling this option allows training models that exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation. """ exp_name: str = field( default=os.path.basename(__file__)[:-3], metadata={"help": "Name of this experiment."}, ) reward_model_path: str = field( default="EleutherAI/pythia-160m", metadata={"help": "Path to the reward model."}, ) model_adapter_name: Optional[str] = field( default=None, metadata={"help": "Name of the train target PEFT adapter, when using LoRA with multiple adapters."}, ) ref_adapter_name: Optional[str] = field( default=None, metadata={"help": "Name of the reference PEFT adapter, when using LoRA with multiple adapters."}, ) num_ppo_epochs: int = field( default=4, metadata={"help": "Number of epochs to train."}, ) whiten_rewards: bool = field( default=False, metadata={"help": "Whether to whiten the rewards."}, ) kl_coef: float = field( default=0.05, metadata={"help": "KL coefficient."}, ) kl_estimator: Literal["k1", "k3"] = field( default="k1", metadata={ "help": "Which estimator for KL-Divergence to use from Approximating KL Divergence " "(http://joschu.net/blog/kl-approx.html). Defaults to 'k1', a straightforward, unbiased estimator. Can be " "set to 'k3', an unbiased estimator with lower variance which 'appears to be a strictly better " "estimator'. Cannot be set to 'k2', as it is used for logging purposes." }, ) cliprange: float = field( default=0.2, metadata={"help": "Clip range."}, ) vf_coef: float = field( default=0.1, metadata={"help": "Value function coefficient."}, ) cliprange_value: float = field( default=0.2, metadata={"help": "Clip range for the value function."}, ) gamma: float = field( default=1.0, metadata={"help": "Discount factor."}, ) lam: float = field( default=0.95, metadata={"help": "Lambda value for GAE."}, ) ds3_gather_for_generation: bool = field( default=True, metadata={ "help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for " "generation, improving generation speed. However, disabling this option allows training models that " "exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation." }, )
trl/trl/trainer/ppo_config.py/0
{ "file_path": "trl/trl/trainer/ppo_config.py", "repo_id": "trl", "token_count": 2306 }
543