repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/mbart/test_tokenization_mbart.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right EN_CODE = 250004 RO_CODE = 250020 @require_sentencepiece @require_tokenizers class MBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = MBartTokenizer rust_tokenizer_class = MBartTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) # overwrite from test_tokenization_common to speed up test def test_save_pretrained(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @unittest.skip("Need to fix this after #26538") def test_training_new_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class MBartEnroIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/mbart-large-en-ro" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def setUpClass(cls): cls.tokenizer: MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020) def test_enro_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], EN_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250026, 250001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = MBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 14), batch.input_ids.shape) self.assertEqual((2, 14), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, }, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xglm/test_tokenization_xglm.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XGLMTokenizer rust_tokenizer_class = XGLMTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<pad>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(len(vocab_keys), 1_008) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_008) def test_full_tokenizer(self): tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return XGLMTokenizer.from_pretrained("facebook/xglm-564M") def test_picklable_without_disk(self): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SAMPLE_VOCAB, f.name) tokenizer = XGLMTokenizer(f.name, keep_accents=True) pickled_tokenizer = pickle.dumps(tokenizer) pickle.loads(pickled_tokenizer) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [2, 31227, 4447, 35] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: skip self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = { 'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="facebook/xglm-564M", padding=False, )
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xglm/test_modeling_flax_xglm.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import transformers from transformers import XGLMConfig, XGLMTokenizer, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, require_sentencepiece, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp import numpy as np from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.xglm.modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel if is_torch_available(): import torch @require_flax class FlaxXGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 3, self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_sentencepiece @require_flax class FlaxXGLMModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxXGLMModel, FlaxXGLMForCausalLM) if is_flax_available() else () all_generative_model_classes = (FlaxXGLMForCausalLM,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxXGLMModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = XGLMTokenizer.from_pretrained("XGLM", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True) model = FlaxXGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.config.num_beams = 1 model.config.do_sample = False jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of questions, but I'm not sure if I'm", "Hey, I'm a newbie to the forum and I'", ] self.assertListEqual(output_string, expected_string) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/xglm-564M") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xglm/test_modeling_tf_xglm.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class TFXGLMModelTester: config_cls = XGLMConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs(self): input_ids = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size), clip_value_min=0, clip_value_max=3 ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config(self): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=True, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class TFXGLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () all_generative_model_classes = (TFXGLMForCausalLM,) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) test_onnx = False test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = TFXGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() @slow def test_model_from_pretrained(self): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") def test_resize_token_embeddings(self): super().test_resize_token_embeddings() @require_tf class TFXGLMModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_xglm(self, verify_outputs=True): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") input_ids = tf.convert_to_tensor([[2, 268, 9865]], dtype=tf.int32) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tf.random.set_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="tf") input_ids = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0"): output_ids = model.generate(input_ids, do_sample=True, seed=[7, 0]) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STR = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(output_str, EXPECTED_OUTPUT_STR) @slow def test_batch_generation(self): model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="tf", padding=True) input_ids = inputs["input_ids"] outputs = model.generate(input_ids=input_ids, attention_mask=inputs["attention_mask"], max_new_tokens=12) inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
0
hf_public_repos/transformers/tests/models
hf_public_repos/transformers/tests/models/xglm/test_modeling_xglm.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import gc import math import unittest from transformers import XGLMConfig, is_torch_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMTokenizer class XGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(gradient_checkpointing=gradient_checkpointing) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, head_mask, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, input_mask, head_mask, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_xglm_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, head_mask=head_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.num_hidden_layers) def create_and_check_xglm_model_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_attention_mask_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.zeros((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_past_large_inputs(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=1) # append to next input_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[ "last_hidden_state" ] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, *args, gradient_checkpointing=False ): model = XGLMForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_xglm_weight_initialization(self, config, *args): model = XGLMModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class XGLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XGLMModel, XGLMForCausalLM) if is_torch_available() else () all_generative_model_classes = (XGLMForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": XGLMModel, "text-generation": XGLMForCausalLM} if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = XGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_xglm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model(*config_and_inputs) def test_xglm_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past(*config_and_inputs) def test_xglm_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_attention_mask_past(*config_and_inputs) def test_xglm_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past_large_inputs(*config_and_inputs) def test_xglm_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_xglm_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_xglm_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = XGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() @require_torch class XGLMModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def _test_lm_generate_xglm_helper( self, gradient_checkpointing=False, verify_outputs=True, ): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") if gradient_checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[2, 268, 9865]], dtype=torch.long, device=torch_device) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_batch_generation(self): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), max_new_tokens=12 ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_lm_generate_xglm(self): self._test_lm_generate_xglm_helper() @slow def test_lm_generate_xglm_with_gradient_checkpointing(self): self._test_lm_generate_xglm_helper(gradient_checkpointing=True) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt") input_ids = tokenized.input_ids output_ids = model.generate(input_ids, do_sample=True, num_beams=1) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STRS = [ # TODO: remove this once we move to torch 2.0 # torch 1.13.1 + cu116 "Today is a nice day and the sun is shining. A nice day with warm rainy", # torch 2.0 + cu117 "Today is a nice day and the water is still cold. We just stopped off for some fresh", ] self.assertIn(output_str, EXPECTED_OUTPUT_STRS) @slow def test_xglm_sample_max_time(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt") input_ids = tokenized.input_ids.to(torch_device) MAX_TIME = 0.15 start = datetime.datetime.now() model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME)) self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) start = datetime.datetime.now() model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.25 * MAX_TIME)) @require_torch_accelerator @require_torch_fp16 def test_batched_nan_fp16(self): model_name = "facebook/xglm-564M" tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") model = XGLMForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_cache=True).to(torch_device) model = model.eval() batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt") input_ids = batch["input_ids"].to(torch_device) attention_mask = batch["attention_mask"].to(torch_device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) self.assertFalse( torch.isnan(outputs.logits[0]).any().item() ) # the first logits could contain NaNs if it fails
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tokenization/test_tokenization_utils.py
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ isort:skip_file """ import os import pickle import tempfile import unittest from typing import Callable, Optional import numpy as np from transformers import ( BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, TensorType, TokenSpan, is_tokenizers_available, ) from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import CaptureStderr, require_flax, require_tf, require_tokenizers, require_torch, slow if is_tokenizers_available(): from tokenizers import Tokenizer from tokenizers.models import WordPiece class TokenizerUtilsTest(unittest.TestCase): def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: tokenizer = tokenizer_class.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, tokenizer_class) self.assertIsInstance(tokenizer, PreTrainedTokenizer) for special_tok in tokenizer.all_special_tokens: self.assertIsInstance(special_tok, str) special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None): batch_encoding_str = pickle.dumps(be_original) self.assertIsNotNone(batch_encoding_str) be_restored = pickle.loads(batch_encoding_str) # Ensure is_fast is correctly restored self.assertEqual(be_restored.is_fast, be_original.is_fast) # Ensure encodings are potentially correctly restored if be_original.is_fast: self.assertIsNotNone(be_restored.encodings) else: self.assertIsNone(be_restored.encodings) # Ensure the keys are the same for original_v, restored_v in zip(be_original.values(), be_restored.values()): if equal_op: self.assertTrue(equal_op(restored_v, original_v)) else: self.assertEqual(restored_v, original_v) @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) def test_tensor_type_from_str(self): self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW) self.assertEqual(TensorType("pt"), TensorType.PYTORCH) self.assertEqual(TensorType("np"), TensorType.NUMPY) @require_tokenizers def test_batch_encoding_pickle(self): import numpy as np tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") # Python no tensor with self.subTest("BatchEncoding (Python, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_p("Small example to encode")) with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) with self.subTest("BatchEncoding (Rust, return_tensors=None)"): self.assert_dump_and_restore(tokenizer_r("Small example to encode")) with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal ) @require_tf @require_tokenizers def test_batch_encoding_pickle_tf(self): import tensorflow as tf def tf_array_equals(t1, t2): return tf.reduce_all(tf.equal(t1, t2)) tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals ) @require_torch @require_tokenizers def test_batch_encoding_pickle_pt(self): import torch tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"): self.assert_dump_and_restore( tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal ) @require_tokenizers def test_batch_encoding_is_fast(self): tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased") tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") with self.subTest("Python Tokenizer"): self.assertFalse(tokenizer_p("Small example to_encode").is_fast) with self.subTest("Rust Tokenizer"): self.assertTrue(tokenizer_r("Small example to_encode").is_fast) @require_tokenizers def test_batch_encoding_word_to_tokens(self): tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased") encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True) self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2)) self.assertEqual(encoded.word_to_tokens(1), None) self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3)) def test_batch_encoding_with_labels(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="np") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_torch def test_batch_encoding_with_labels_pt(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="pt") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_tf def test_batch_encoding_with_labels_tf(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="tf") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="tf", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) @require_flax def test_batch_encoding_with_labels_jax(self): batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]}) tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertEqual(tensor_batch["inputs"].shape, (2, 3)) self.assertEqual(tensor_batch["labels"].shape, (2,)) # test converting the converted with CaptureStderr() as cs: tensor_batch = batch.convert_to_tensors(tensor_type="jax") self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}") batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0}) tensor_batch = batch.convert_to_tensors(tensor_type="jax", prepend_batch_axis=True) self.assertEqual(tensor_batch["inputs"].shape, (1, 3)) self.assertEqual(tensor_batch["labels"].shape, (1,)) def test_padding_accepts_tensors(self): features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="np") self.assertTrue(isinstance(batch["input_ids"], np.ndarray)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_torch def test_padding_accepts_tensors_pt(self): import torch features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="pt") self.assertTrue(isinstance(batch["input_ids"], torch.Tensor)) self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tf def test_padding_accepts_tensors_tf(self): import tensorflow as tf features = [{"input_ids": tf.constant([0, 1, 2])}, {"input_ids": tf.constant([0, 1, 2, 3])}] tokenizer = BertTokenizer.from_pretrained("bert-base-cased") batch = tokenizer.pad(features, padding=True) self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) batch = tokenizer.pad(features, padding=True, return_tensors="tf") self.assertTrue(isinstance(batch["input_ids"], tf.Tensor)) self.assertEqual(batch["input_ids"].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]]) @require_tokenizers def test_instantiation_from_tokenizers(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer) @require_tokenizers def test_instantiation_from_tokenizers_json_file(self): bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) with tempfile.TemporaryDirectory() as tmpdirname: bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json")) PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/tokenization/test_tokenization_fast.py
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent.futures import json import os import shutil import tempfile import unittest from transformers import AutoTokenizer, PreTrainedTokenizerFast from transformers.testing_utils import require_tokenizers from ..test_tokenization_common import TokenizerTesterMixin @require_tokenizers class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase): rust_tokenizer_class = PreTrainedTokenizerFast test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_vocab_key = "tokenizer_file" def setUp(self): self.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map super().setUp() self.test_rust_tokenizer = True model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"] self.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe" # Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment) self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths] tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0]) tokenizer.save_pretrained(self.tmpdirname) def test_tokenizer_mismatch_warning(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_encode_decode_with_spaces(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_added_tokens_serialization(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_additional_special_tokens_serialization(self): pass def test_pretrained_model_lists(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_prepare_for_model(self): # We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any # model pass def test_rust_tokenizer_signature(self): # PreTrainedTokenizerFast doesn't have tokenizer_file in its signature pass def test_training_new_tokenizer(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_special_tokens_change(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer_with_special_tokens_change() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_bytelevel(self): tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name) toy_text_iterator = ("a" for _ in range(1000)) new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) encoding_ids = new_tokenizer.encode("a🤗") self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) def test_init_from_tokenizers_model(self): from tokenizers import Tokenizer sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"] tokenizer = Tokenizer.from_pretrained("t5-base") # Enable padding tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8) self.assertEqual( tokenizer.padding, { "length": 512, "pad_to_multiple_of": 8, "pad_id": 0, "pad_token": "<pad>", "pad_type_id": 0, "direction": "right", }, ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.pad_token_id, 0) self.assertEqual(tok.padding_side, "right") self.assertEqual(tok.pad_token, "<pad>") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"} ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.truncation_side, "right") self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first") self.assertEqual(tok.init_kwargs["max_length"], 8) self.assertEqual(tok.init_kwargs["stride"], 0) # NOTE even if the model has a default max_length, it is not used... # thus tok(sentences, truncation = True) does nothing and does not warn either self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip @require_tokenizers class TokenizerVersioningTest(unittest.TestCase): def test_local_versioning(self): tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: # Hack to save this in the tokenizer_config.json tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"] tokenizer.save_pretrained(tmp_dir) json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w")) # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer) + 1) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old tokenizer file as the version of Transformers is < 4.0.0 shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json")) tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"] tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer)) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) def test_repo_versioning(self): # This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower. repo = "hf-internal-testing/test-two-tokenizers" # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 tokenizer = AutoTokenizer.from_pretrained(repo) self.assertEqual(len(tokenizer), 28997) json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.tokenization_utils_base.__version__ = "3.0.0" old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo) self.assertEqual(len(old_tokenizer), 28996) json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) @require_tokenizers class ReduceMutableBorrowTests(unittest.TestCase): def test_async_share_tokenizer(self): # See https://github.com/huggingface/transformers/pull/12550 # and https://github.com/huggingface/tokenizers/issues/537 tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel") text = "The Matrix is a 1999 science fiction action film." with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)] return_value = [future.result() for future in futures] self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)]) def fetch(self, tokenizer, text): return tokenizer.encode(text, truncation="longest_first", padding="longest")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fsdp/test_fsdp.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import unittest from functools import partial from parameterized import parameterized import tests.trainer.test_trainer from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_accelerate, require_fsdp, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import FSDPOption, set_seed from transformers.utils import is_accelerate_available, is_torch_bf16_available_on_device if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_1 else: is_torch_greater_or_equal_than_2_1 = False # default torch.distributed port DEFAULT_MASTER_PORT = "10999" dtypes = ["fp16"] if is_torch_bf16_available_on_device(torch_device): dtypes += ["bf16"] sharding_strategies = ["full_shard", "shard_grad_op"] state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"] set_seed(42) params = list(itertools.product(sharding_strategies, dtypes)) def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, ) # hack to restore original logging level pre #21700 get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") require_fsdp_version = require_fsdp if is_accelerate_available(): from accelerate.utils.constants import ( FSDP_PYTORCH_VERSION, FSDP_SHARDING_STRATEGY, ) require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION) def get_launcher(distributed=False, use_accelerate=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) if use_accelerate: return f"""accelerate launch --num_processes {num_gpus} --main_process_port {master_port} --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_state_dict_type SHARDED_STATE_DICT --fsdp_transformer_layer_cls_to_wrap BertLayer""".split() return f"torchrun --nnodes 1 --nproc-per-node {num_gpus} --master-port {master_port}".split() def _parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" @require_accelerate @require_torch_accelerator @require_fsdp_version class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.fsdp_config = { "backward_prefetch": "backward_pre", "forward_prefetch": "False", "limit_all_gathers": "False", "use_orig_params": "True", "sync_module_states": "True", "activation_checkpointing": "False", "min_num_params": 1, } def tearDown(self): super().tearDown() @parameterized.expand(params, name_func=_parameterized_custom_name_func) def test_fsdp_config(self, sharding_strategy, dtype): output_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "fsdp": f"{sharding_strategy} offload auto_wrap", "fsdp_config": self.fsdp_config, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) self.assertEqual(trainer.args.fsdp[0], sharding_strategy) self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD) self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP) for k, v in trainer.args.fsdp_config.items(): self.assertEqual(v, self.fsdp_config[k]) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_basic_run(self, sharding_strategy, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}"] fsdp_args = ["--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(dtypes) @require_torch_multi_accelerator @slow @unittest.skipIf(not is_torch_greater_or_equal_than_2_1, reason="This test on pytorch 2.0 takes 4 hours.") def test_basic_run_with_cpu_offload(self, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}", "--max_steps", "10"] fsdp_args = ["--fsdp", "full_shard auto_wrap offload", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_training_and_can_resume_normally(self, state_dict_type): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) sharding_strategy = "full_shard" use_accelerate = state_dict_type == "SHARDED_STATE_DICT" launcher = get_launcher(True, use_accelerate=use_accelerate) args = self.get_base_args(output_dir, 2, 25).split() script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] logs = self.run_cmd_and_get_logs(use_accelerate, sharding_strategy, launcher, script, args, output_dir) # resume from ckpt checkpoint = os.path.join(output_dir, "checkpoint-115") resume_args = args + f"--resume_from_checkpoint {checkpoint}".split() logs_resume = self.run_cmd_and_get_logs( use_accelerate, sharding_strategy, launcher, script, resume_args, output_dir ) for log, log1 in zip(logs, logs_resume): if "learning_rate" in log: self.assertAlmostEqual(log["learning_rate"], log1["learning_rate"], delta=1e-5) def run_cmd_and_get_logs(self, use_accelerate, sharding_strategy, launcher, script, args, output_dir): if not use_accelerate: fsdp_args = [ "--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer", ] cmd = launcher + script + args + fsdp_args else: fsdp_config = f""" --fsdp_sharding_strategy {FSDP_SHARDING_STRATEGY.index(sharding_strategy.upper()) + 1} """.split() cmd = launcher + fsdp_config + script + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history return logs def get_base_args(self, output_dir, num_epochs, logging_steps): return f""" --model_name_or_path bert-base-cased --task_name mrpc --output_dir {output_dir} --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs {num_epochs} --lr_scheduler_type cosine --logging_steps {logging_steps} --save_strategy epoch --do_eval --evaluation_strategy epoch --report_to none """
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/bnb/test_mixed_int8.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_accelerate_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class BaseMixedInt8Test(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_get_keys_to_not_convert_trust_remote_code(self): r""" Test the `get_keys_to_not_convert` function with `trust_remote_code` models. """ from accelerate import init_empty_weights from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) def test_get_keys_to_not_convert(self): r""" Test the `get_keys_to_not_convert` function. """ from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) # The order of the keys does not matter, so we sort them before comparing, same for the other tests. self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ import bitsandbytes as bnb quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. Checks also if other models are casted correctly. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.float() with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual( self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT ) def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertEqual(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUT) @require_torch_multi_gpu class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertEqual(output_text, self.EXPECTED_OUTPUT) def test_cpu_gpu_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class MixedInt8GPT2Test(MixedInt8Test): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUT = "Hello my name is John Doe, and I'm a big fan of" def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/bnb/README.md
# Testing mixed int8 quantization ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1660567705337-62441d1d9fdefb55a0b7d12c.png) The following is the recipe on how to effectively debug `bitsandbytes` integration on Hugging Face `transformers`. ## Library requirements + `transformers>=4.22.0` + `accelerate>=0.12.0` + `bitsandbytes>=0.31.5`. ## Hardware requirements The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported. ## Virutal envs ```bash conda create --name int8-testing python==3.8 pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` if `transformers>=4.23.0` is not released yet, then use: ``` pip install git+https://github.com/huggingface/transformers.git ``` ## Troubleshooting A list of common errors: ### Torch does not correctly do the operations on GPU First check that: ```py import torch vec = torch.randn(1, 2, 3).to(0) ``` Works without any error. If not, install torch using `conda` like: ```bash conda create --name int8-testing python==3.8 conda install pytorch torchvision torchaudio cudatoolkit=11.6 -c pytorch -c conda-forge pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` For the latest pytorch instructions please see [this](https://pytorch.org/get-started/locally/) and the snippet above should work. ### ` bitsandbytes operations are not supported under CPU!` This happens when some Linear weights are set to the CPU when using `accelerate`. Please check carefully `model.hf_device_map` and make sure that there is no `Linear` module that is assigned to CPU. It is fine to have the last module (usually the Lm_head) set on CPU. ### `To use the type as a Parameter, please correct the detach() semantics defined by __torch_dispatch__() implementation.` Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved. ### `Parameter has no attribue .CB` Same solution as above. ### `RuntimeError: CUDA error: an illegal memory access was encountered ... consider passing CUDA_LAUNCH_BLOCKING=1` Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe an error as described in the next section. ### `CUDA illegal memory error: an illegal memory access at line...`: Check the CUDA verisons with: ``` nvcc --version ``` and confirm it is the same version as the one detected by `bitsandbytes`. If not, run: ``` ls -l $CONDA_PREFIX/lib/libcudart.so ``` or ``` ls -l $LD_LIBRARY_PATH ``` Check if `libcudart.so` has a correct symlink that is set. Sometimes `nvcc` detects the correct CUDA version but `bitsandbytes` doesn't. You have to make sure that the symlink that is set for the file `libcudart.so` is redirected to the correct CUDA file. Here is an example of a badly configured CUDA installation: `nvcc --version` gives: ![Screenshot 2022-08-15 at 15.12.23.png](https://cdn-uploads.huggingface.co/production/uploads/1660569220888-62441d1d9fdefb55a0b7d12c.png) which means that the detected CUDA version is 11.3 but `bitsandbytes` outputs: ![image.png](https://cdn-uploads.huggingface.co/production/uploads/1660569284243-62441d1d9fdefb55a0b7d12c.png) First check: ```bash echo $LD_LIBRARY_PATH ``` If this contains multiple paths separated by `:`. Then you have to make sure that the correct CUDA version is set. By doing: ```bash ls -l $path/libcudart.so ``` On each path (`$path`) separated by `:`. If not, simply run ```bash ls -l $LD_LIBRARY_PATH/libcudart.so ``` and you can see ![Screenshot 2022-08-15 at 15.12.33.png](https://cdn-uploads.huggingface.co/production/uploads/1660569176504-62441d1d9fdefb55a0b7d12c.png) If you see that the file is linked to the wrong CUDA version (here 10.2), find the correct location for `libcudart.so` (`find --name libcudart.so`) and replace the environment variable `LD_LIBRARY_PATH` with the one containing the correct `libcudart.so` file.
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/bnb/test_4bit.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class Base4bitTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 2.109659552692574 # This was obtained on a RTX Titan so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University") MAX_NEW_TOKENS = 10 def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class Bnb4BitTest(Base4bitTest): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_quantization_num_parameters(self): r""" Test if the number of returned parameters is correct See: https://github.com/huggingface/transformers/issues/25978 """ num_params_4bit = self.model_4bit.num_parameters() num_params_fp16 = self.model_fp16.num_parameters() self.assertEqual(num_params_4bit, num_params_fp16) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_4bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE) linear = get_some_linear_layer(self.model_4bit) self.assertTrue(linear.weight.__class__ == Params4bit) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_4bit.config._pre_quantization_dtype == torch.float16) def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) def test_rwkv_4bit(self): r""" A simple test to check if 4-bit RWKV inference works as expected. """ model_id = "RWKV/rwkv-4-169m-pile" quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) tok = AutoTokenizer.from_pretrained(model_id) text = "Hello my name is" input_ids = tok.encode(text, return_tensors="pt").to(0) _ = model.generate(input_ids, max_new_tokens=30) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_4bit = True model_4bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_4bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_on_save_pretrained(self): r""" Test whether trying to save a model after converting it in 8-bit will throw a warning. """ with self.assertRaises(NotImplementedError), tempfile.TemporaryDirectory() as tmpdirname: self.model_4bit.save_pretrained(tmpdirname) def test_raise_if_config_and_load_in_4bit(self): r""" Test that loading the model with the config and `load_in_4bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_4bit=True, device_map="auto", bnb_4bit_quant_type="nf4", ) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. Checks also if other models are casted correctly. """ with self.assertRaises(ValueError): # Tries with `str` self.model_4bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_4bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.float() with self.assertRaises(ValueError): # Tries with a `device` self.model_4bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("t5-small", load_in_4bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class Bnb4BitT5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear4bit)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_4bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class Classes4BitModelTest(Base4bitTest): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_4bit=True, device_map="auto" ) # CausalLM model self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_4bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_4bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Params4bit self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit) # Other heads should be nn.Parameter self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class Pipeline4BitTest(Base4bitTest): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.float16}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class Bnb4bitTestMultiGpu(Base4bitTest): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_4bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) class Bnb4BitTestTraining(Base4bitTest): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): return # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class Bnb4BitGPT2Test(Bnb4BitTest): model_name = "gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 3.3191854854152187
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/gptq/test_gptq.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") GPTQConfig(bits=2, dataset="ptb-new") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): """ Test the serialization of the model and the loading of the quantized weights works with another config file """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.assertEqual(self.quantized_model.config.quantization_config.use_exllama, False) # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.use_exllama, True) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): """ Test GPTQ model with exllama kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") model_name = "hf-internal-testing/Llama-2-7B-GPTQ" revision = "gptq-4bit-128g-actorder_True" input_text = "Hello my name is" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. """ prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" * 500 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): """ Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year") model_name = "hf-internal-testing/Llama-2-7B-GPTQ" revision = "gptq-4bit-128g-actorder_True" input_text = "Hello my name is" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, revision=cls.revision, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) # fail when run all together @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
0
hf_public_repos/transformers/tests/quantization
hf_public_repos/transformers/tests/quantization/autoawq/test_awq.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AwqConfig, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_auto_awq, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class AwqConfigTest(unittest.TestCase): def test_wrong_backend(self): """ Simple test that checks if a user passes a wrong backend an error is raised """ # This should work fine _ = AwqConfig(bits=4) with self.assertRaises(ValueError): AwqConfig(bits=4, backend="") # These should work fine _ = AwqConfig(bits=4, version="GEMM") _ = AwqConfig(bits=4, version="gemm") with self.assertRaises(ValueError): AwqConfig(bits=4, backend="unexisting-backend") # LLMAWQ does not work on a T4 with self.assertRaises(ValueError): AwqConfig(bits=4, backend="llm-awq") def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = AwqConfig(bits=4) config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"bits": 2, "zero_point": False, "backend": "autoawq"} quantization_config = AwqConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) self.assertEqual(dict["zero_point"], quantization_config.zero_point) self.assertEqual(dict["backend"], quantization_config.backend) @slow @require_torch_gpu @require_auto_awq @require_accelerate class AwqTest(unittest.TestCase): model_name = "TheBloke/Mistral-7B-v0.1-AWQ" dummy_transformers_model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" EXPECTED_OUTPUT_BF16 = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Exercise and Sport Science with a" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from transformers.integrations.awq import replace_with_awq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AwqConfig(bits=4) with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears, nb_awq_linear) # Try with `modules_not_to_convert` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_awq_linear( model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears - 1, nb_awq_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = AwqConfig(bits=4) with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.bfloat16).to( torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16) def test_quantized_model_no_device_map(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1, 2, 3}) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @slow @require_torch_gpu @require_auto_awq @require_accelerate class AwqFusedTest(unittest.TestCase): model_name = "TheBloke/Mistral-7B-OpenOrca-AWQ" model_revision = "7048b2af77d0dd1c81b000b19d73f9cc8950b510" custom_mapping_model_id = "TheBloke/Yi-34B-AWQ" custom_model_revision = "f1b2cd1b7459ceecfdc1fac5bb8725f13707c589" prompt = ( "You're standing on the surface of the Earth. " "You walk one mile south, one mile west and one mile north. " "You end up exactly where you started. Where are you?" ) EXPECTED_GENERATION = prompt + "\n\nThis is a classic puzzle that has been around for" EXPECTED_GENERATION_CUSTOM_MODEL = "HelloWorld.java:11)\r\n\tat org" def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def _check_fused_modules(self, model): has_fused_modules = False fused_modules_name = ["QuantAttentionFused", "QuantFusedMLP", "FasterTransformerRMSNorm"] for _, module in model.named_modules(): if module.__class__.__name__ in fused_modules_name: has_fused_modules = True break self.assertTrue(has_fused_modules, "Modules fusing not performed correctly!") def test_raise_save_pretrained(self): """ Test that `save_pretrained` is effectively blocked for fused models """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) def test_generation_fused(self): """ Test generation quality for fused models - single batch case """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision) inputs = tokenizer(self.prompt, return_tensors="pt").to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION) def test_generation_fused_batched(self): """ Test generation quality for fused models - multi batch case """ quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True) model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, low_cpu_mem_usage=True, revision=self.model_revision, ).to(torch_device) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision) tokenizer.pad_token_id = tokenizer.eos_token_id inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION) @require_torch_multi_gpu def test_generation_custom_model(self): """ Test generation quality for fused models using custom fused map. """ quantization_config = AwqConfig( bits=4, fuse_max_seq_len=512, modules_to_fuse={ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "layernorm": ["ln1", "ln2", "norm"], "mlp": ["gate_proj", "up_proj", "down_proj"], "use_alibi": False, "num_attention_heads": 56, "num_key_value_heads": 8, "hidden_size": 7168, }, ) model = AutoModelForCausalLM.from_pretrained( self.custom_mapping_model_id, quantization_config=quantization_config, trust_remote_code=True, device_map="balanced", revision=self.custom_model_revision, ) self._check_fused_modules(model) tokenizer = AutoTokenizer.from_pretrained( self.custom_mapping_model_id, revision=self.custom_model_revision, trust_remote_code=True ) prompt = "Hello" inputs = tokenizer(prompt, return_tensors="pt").to(torch_device) outputs = model.generate(**inputs, max_new_tokens=12) self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_CUSTOM_MODEL)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/bettertransformer/test_integration.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class BetterTransformerIntegrationTest(unittest.TestCase): # refer to the full test suite in Optimum library: # https://github.com/huggingface/optimum/tree/main/tests/bettertransformer def test_transform_and_reverse(self): r""" Classic tests to simply check if the conversion has been successfull. """ model_id = "hf-internal-testing/tiny-random-t5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) inp = tokenizer("This is me", return_tensors="pt") model = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) output = model.generate(**inp) model = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_reloaded = AutoModelForSeq2SeqLM.from_pretrained(tmpdirname) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()) ) output_from_pretrained = model_reloaded.generate(**inp) self.assertTrue(torch.allclose(output, output_from_pretrained)) def test_error_save_pretrained(self): r""" The save_pretrained method should raise a ValueError if the model is in BetterTransformer mode. All should be good if the model is reversed. """ model_id = "hf-internal-testing/tiny-random-t5" model = AutoModelForSeq2SeqLM.from_pretrained(model_id) model = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError): model.save_pretrained(tmpdirname) model = model.reverse_bettertransformer() model.save_pretrained(tmpdirname)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_get_test_info.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) BERT_TEST_FILE = os.path.join("tests", "models", "bert", "test_modeling_bert.py") BLIP_TEST_FILE = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class GetTestInfoTester(unittest.TestCase): def test_get_test_to_tester_mapping(self): bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE) blip_test_tester_mapping = get_test_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = {"BertModelTest": "BertModelTester"} EXPECTED_BLIP_MAPPING = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(bert_test_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_test_tester_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_test_mapping(self): bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE) blip_model_test_mapping = get_model_to_test_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } EXPECTED_BLIP_MAPPING = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(bert_model_test_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_test_mapping), EXPECTED_BLIP_MAPPING) def test_get_model_to_tester_mapping(self): bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE) blip_model_tester_mapping = get_model_to_tester_mapping(BLIP_TEST_FILE) EXPECTED_BERT_MAPPING = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } EXPECTED_BLIP_MAPPING = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(bert_model_tester_mapping), EXPECTED_BERT_MAPPING) self.assertEqual(get_test_info.to_json(blip_model_tester_mapping), EXPECTED_BLIP_MAPPING)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_check_copies.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 from check_copies import convert_to_localized_md, find_code_in_transformers, is_copy_consistent # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. REFERENCE_CODE = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ MOCK_BERT_CODE = """from ...modeling_utils import PreTrainedModel def bert_function(x): return x class BertAttention(nn.Module): def __init__(self, config): super().__init__() class BertModel(BertPreTrainedModel): def __init__(self, config): super().__init__() self.bert = BertEncoder(config) @add_docstring(BERT_DOCSTRING) def forward(self, x): return self.bert(x) """ MOCK_BERT_COPY_CODE = """from ...modeling_utils import PreTrainedModel # Copied from transformers.models.bert.modeling_bert.bert_function def bert_copy_function(x): return x # Copied from transformers.models.bert.modeling_bert.BertAttention class BertCopyAttention(nn.Module): def __init__(self, config): super().__init__() # Copied from transformers.models.bert.modeling_bert.BertModel with Bert->BertCopy all-casing class BertCopyModel(BertCopyPreTrainedModel): def __init__(self, config): super().__init__() self.bertcopy = BertCopyEncoder(config) @add_docstring(BERTCOPY_DOCSTRING) def forward(self, x): return self.bertcopy(x) """ MOCK_DUMMY_BERT_CODE_MATCH = """ class BertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def existing_common(self, c): return 4 def existing_diff_to_be_ignored(self, c): return 9 """ MOCK_DUMMY_ROBERTA_CODE_MATCH = """ # Copied from transformers.models.dummy_bert_match.modeling_dummy_bert_match.BertDummyModel with BertDummy->RobertaBertDummy class RobertaBertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def existing_common(self, c): return 4 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 """ MOCK_DUMMY_BERT_CODE_NO_MATCH = """ class BertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_bert(self, c): return 7 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 8 def existing_diff_to_be_ignored(self, c): return 9 """ MOCK_DUMMY_ROBERTA_CODE_NO_MATCH = """ # Copied from transformers.models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel with BertDummy->RobertaBertDummy class RobertaBertDummyModel: attr_1 = 1 attr_2 = 3 def __init__(self, a=1, b=2): self.a = a self.b = b # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_roberta_not_ignored(self, c): return 2 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 5 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 """ EXPECTED_REPLACED_CODE = """ # Copied from transformers.models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel with BertDummy->RobertaBertDummy class RobertaBertDummyModel: attr_1 = 1 attr_2 = 2 def __init__(self, a=1, b=2): self.a = a self.b = b # Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward def forward(self, c): return 1 def only_in_bert(self, c): return 7 def existing_common(self, c): return 4 def existing_diff_not_ignored(self, c): return 8 # Ignore copy def existing_diff_to_be_ignored(self, c): return 6 # Ignore copy def only_in_roberta_to_be_ignored(self, c): return 3 """ def replace_in_file(filename, old, new): with open(filename, "r", encoding="utf-8") as f: content = f.read() content = content.replace(old, new) with open(filename, "w", encoding="utf-8", newline="\n") as f: f.write(content) def create_tmp_repo(tmp_dir): """ Creates a mock repository in a temporary folder for testing. """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) models = { "bert": MOCK_BERT_CODE, "bertcopy": MOCK_BERT_COPY_CODE, "dummy_bert_match": MOCK_DUMMY_BERT_CODE_MATCH, "dummy_roberta_match": MOCK_DUMMY_ROBERTA_CODE_MATCH, "dummy_bert_no_match": MOCK_DUMMY_BERT_CODE_NO_MATCH, "dummy_roberta_no_match": MOCK_DUMMY_ROBERTA_CODE_NO_MATCH, } for model, code in models.items(): model_subdir = model_dir / model model_subdir.mkdir(exist_ok=True) with open(model_subdir / f"modeling_{model}.py", "w", encoding="utf-8", newline="\n") as f: f.write(code) @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `check_copies` to use a different location for the repo. """ old_repo_path = check_copies.REPO_PATH old_doc_path = check_copies.PATH_TO_DOCS old_transformer_path = check_copies.TRANSFORMERS_PATH repo_path = Path(new_folder).resolve() check_copies.REPO_PATH = str(repo_path) check_copies.PATH_TO_DOCS = str(repo_path / "docs" / "source" / "en") check_copies.TRANSFORMERS_PATH = str(repo_path / "src" / "transformers") try: yield finally: check_copies.REPO_PATH = old_repo_path check_copies.PATH_TO_DOCS = old_doc_path check_copies.TRANSFORMERS_PATH = old_transformer_path class CopyCheckTester(unittest.TestCase): def test_find_code_in_transformers(self): with tempfile.TemporaryDirectory() as tmp_folder: create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): code = find_code_in_transformers("models.bert.modeling_bert.BertAttention") reference_code = ( "class BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n" ) self.assertEqual(code, reference_code) def test_is_copy_consistent(self): path_to_check = ["src", "transformers", "models", "bertcopy", "modeling_bertcopy.py"] with tempfile.TemporaryDirectory() as tmp_folder: # Base check create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, []) # Base check with an inconsistency create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) replace_in_file(file_to_check, "self.bertcopy(x)", "self.bert(x)") diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, [["models.bert.modeling_bert.BertModel", 22]]) _ = is_copy_consistent(file_to_check, overwrite=True) with open(file_to_check, "r", encoding="utf-8") as f: self.assertEqual(f.read(), MOCK_BERT_COPY_CODE) def test_is_copy_consistent_with_ignored_match(self): path_to_check = ["src", "transformers", "models", "dummy_roberta_match", "modeling_dummy_roberta_match.py"] with tempfile.TemporaryDirectory() as tmp_folder: # Base check create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) self.assertEqual(diffs, []) def test_is_copy_consistent_with_ignored_no_match(self): path_to_check = [ "src", "transformers", "models", "dummy_roberta_no_match", "modeling_dummy_roberta_no_match.py", ] with tempfile.TemporaryDirectory() as tmp_folder: # Base check with an inconsistency create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): file_to_check = os.path.join(tmp_folder, *path_to_check) diffs = is_copy_consistent(file_to_check) # line 6: `attr_2 = 3` in `MOCK_DUMMY_ROBERTA_CODE_NO_MATCH`. # (which has a leading `\n`.) self.assertEqual( diffs, [["models.dummy_bert_no_match.modeling_dummy_bert_no_match.BertDummyModel", 6]] ) _ = is_copy_consistent(file_to_check, overwrite=True) with open(file_to_check, "r", encoding="utf-8") as f: self.assertEqual(f.read(), EXPECTED_REPLACED_CODE) def test_convert_to_localized_md(self): localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"] md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) localized_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) converted_md_list_sample = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) num_models_equal, converted_md_list = convert_to_localized_md( md_list, localized_md_list, localized_readme["format_model_list"] ) self.assertFalse(num_models_equal) self.assertEqual(converted_md_list, converted_md_list_sample) num_models_equal, converted_md_list = convert_to_localized_md( md_list, converted_md_list, localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(num_models_equal) link_changed_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) link_unchanged_md_list = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) converted_md_list_sample = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) num_models_equal, converted_md_list = convert_to_localized_md( link_changed_md_list, link_unchanged_md_list, localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(converted_md_list, converted_md_list_sample)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_check_dummies.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_TRANSFORMERS = os.path.join(git_repo_path, "src", "transformers") DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")') self.assertIsNone(no_backend) simple_backend = find_backend(" if not is_tokenizers_available():") self.assertEqual(simple_backend, "tokenizers") backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):") self.assertEqual(double_backend, "sentencepiece_and_tokenizers") double_backend_with_underscore = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("tensorflow_text", objects) self.assertIn("sentencepiece_and_tokenizers", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertModel", objects["tf"]) self.assertIn("FlaxBertModel", objects["flax"]) self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertTokenizer", objects["tensorflow_text"]) self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_tests_fetcher.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path from git import Repo from transformers.testing_utils import CaptureStdout REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(REPO_PATH, "utils")) import tests_fetcher # noqa: E402 from tests_fetcher import ( # noqa: E402 checkout_commit, clean_code, create_module_to_test_map, create_reverse_dependency_map, create_reverse_dependency_tree, diff_is_docstring_only, extract_imports, get_all_tests, get_diff, get_module_dependencies, get_tree_starting_at, infer_tests_to_run, init_test_examples_dependencies, parse_commit_message, print_tree_deps_of, ) BERT_MODELING_FILE = "src/transformers/models/bert/modeling_bert.py" BERT_MODEL_FILE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code """ BERT_MODEL_FILE_NEW_DOCSTRING = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. It has been updated. ''' This is the code """ BERT_MODEL_FILE_NEW_CODE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code. It has been updated """ def create_tmp_repo(tmp_dir, models=None): """ Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models provided (which defaults to just `["bert"]`). """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) repo = Repo.init(tmp_dir) if models is None: models = ["bert"] class_names = [model[0].upper() + model[1:] for model in models] transformers_dir = tmp_dir / "src" / "transformers" transformers_dir.mkdir(parents=True, exist_ok=True) with open(transformers_dir / "__init__.py", "w") as f: init_lines = ["from .utils import cached_file, is_torch_available"] init_lines.extend( [f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)] ) f.write("\n".join(init_lines) + "\n") with open(transformers_dir / "configuration_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") with open(transformers_dir / "modeling_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") utils_dir = tmp_dir / "src" / "transformers" / "utils" utils_dir.mkdir(exist_ok=True) with open(utils_dir / "__init__.py", "w") as f: f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n") with open(utils_dir / "hub.py", "w") as f: f.write("import huggingface_hub\n\ncode") with open(utils_dir / "imports.py", "w") as f: f.write("code") model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("\n".join([f"import {model}" for model in models])) for model, cls in zip(models, class_names): model_dir = tmp_dir / "src" / "transformers" / "models" / model model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n") with open(model_dir / f"configuration_{model}.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / f"modeling_{model}.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls) f.write(modeling_code) test_dir = tmp_dir / "tests" test_dir.mkdir(exist_ok=True) with open(test_dir / "test_modeling_common.py", "w") as f: f.write("from transformers.modeling_utils import PreTrainedModel\ncode") for model, cls in zip(models, class_names): test_model_dir = test_dir / "models" / model test_model_dir.mkdir(parents=True, exist_ok=True) (test_model_dir / "__init__.py").touch() with open(test_model_dir / f"test_modeling_{model}.py", "w") as f: f.write( f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) example_dir = tmp_dir / "examples" example_dir.mkdir(exist_ok=True) for framework in ["flax", "pytorch", "tensorflow"]: framework_dir = example_dir / framework framework_dir.mkdir(exist_ok=True) with open(framework_dir / f"test_{framework}_examples.py", "w") as f: f.write("""test_args = "run_glue.py"\n""") glue_dir = framework_dir / "text-classification" glue_dir.mkdir(exist_ok=True) with open(glue_dir / "run_glue.py", "w") as f: f.write("from transformers import BertModel\n\ncode") repo.index.add(["examples", "src", "tests"]) repo.index.commit("Initial commit") repo.create_head("main") repo.head.reference = repo.refs.main repo.delete_head("master") return repo @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo. """ old_repo_path = tests_fetcher.PATH_TO_REPO tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve() tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" try: yield finally: tests_fetcher.PATH_TO_REPO = old_repo_path tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" def commit_changes(filenames, contents, repo, commit_message="Commit"): """ Commit new `contents` to `filenames` inside a given `repo`. """ if not isinstance(filenames, list): filenames = [filenames] if not isinstance(contents, list): contents = [contents] folder = Path(repo.working_dir) for filename, content in zip(filenames, contents): with open(folder / filename, "w") as f: f.write(content) repo.index.add(filenames) commit = repo.index.commit(commit_message) return commit.hexsha class TestFetcherTester(unittest.TestCase): def test_checkout_commit(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_sha = repo.head.commit.hexsha new_sha = commit_changes(BERT_MODELING_FILE, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert repo.head.commit.hexsha == new_sha with checkout_commit(repo, initial_sha): assert repo.head.commit.hexsha == initial_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE assert repo.head.commit.hexsha == new_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE_NEW_DOCSTRING def test_clean_code(self): # Clean code removes all strings in triple quotes assert clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n') == "code\ncode" assert clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''") == "code\ncode" # Clean code removes all comments assert clean_code("code\n# Comment\ncode") == "code\ncode" assert clean_code("code # inline comment\ncode") == "code \ncode" def test_get_all_tests(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): assert get_all_tests() == ["tests/models/bert", "tests/test_modeling_common.py"] def test_get_all_tests_on_full_repo(self): all_tests = get_all_tests() assert "tests/models/albert" in all_tests assert "tests/models/bert" in all_tests assert "tests/repo_utils" in all_tests assert "tests/test_pipeline_mixin.py" in all_tests assert "tests/models" not in all_tests assert "tests/__pycache__" not in all_tests assert "tests/models/albert/test_modeling_albert.py" not in all_tests assert "tests/repo_utils/test_tests_fetcher.py" not in all_tests def test_diff_is_docstring_only(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) branching_point = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert diff_is_docstring_only(repo, branching_point, bert_file) commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert not diff_is_docstring_only(repo, branching_point, bert_file) def test_get_diff(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_commit = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING + "\n# Adding a comment\n", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [ "src/transformers/models/bert/modeling_bert.py" ] commit_changes("src/transformers/utils/hub.py", "import huggingface_hub\n\nnew code", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == ["src/transformers/utils/hub.py"] assert get_diff(repo, repo.head.commit, [initial_commit]) == [ "src/transformers/models/bert/modeling_bert.py", "src/transformers/utils/hub.py", ] def test_extract_imports_relative(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_imports = [ ("src/transformers/modeling_utils.py", ["PreTrainedModel"]), ("src/transformers/utils/__init__.py", ["is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] expected_utils_imports = [ ("src/transformers/utils/hub.py", ["cached_file"]), ("src/transformers/utils/imports.py", ["is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports assert extract_imports("src/transformers/utils/__init__.py") == expected_utils_imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import cached_file, is_torch_available\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import (\n cached_file,\n is_torch_available\n)\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_extract_imports_absolute(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import cached_file, is_torch_available\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with base imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/__init__.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_get_module_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies expected_test_bert_dependencies = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("tests/models/bert/test_modeling_bert.py") == expected_test_bert_dependencies ) # Test with a submodule (tmp_folder / "src/transformers/utils/logging.py").touch() with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import logging\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/logging.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an object non-imported in the init create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import CONSTANT\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/__init__.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an example create_tmp_repo(tmp_folder) expected_example_dependencies = ["src/transformers/models/bert/modeling_bert.py"] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("examples/pytorch/text-classification/run_glue.py") == expected_example_dependencies ) def test_create_reverse_dependency_tree(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): tree = create_reverse_dependency_tree() init_edges = [ "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "src/transformers/__init__.py"} == set(init_edges) bert_edges = [ "src/transformers/modeling_utils.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", ] assert {f for f, g in tree if g == "src/transformers/models/bert/modeling_bert.py"} == set(bert_edges) test_bert_edges = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "tests/models/bert/test_modeling_bert.py"} == set(test_bert_edges) def test_get_tree_starting_at(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): edges = create_reverse_dependency_tree() bert_tree = get_tree_starting_at("src/transformers/models/bert/modeling_bert.py", edges) config_utils_tree = get_tree_starting_at("src/transformers/configuration_utils.py", edges) expected_bert_tree = [ "src/transformers/models/bert/modeling_bert.py", [("src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py")], ] assert bert_tree == expected_bert_tree expected_config_tree = [ "src/transformers/configuration_utils.py", [("src/transformers/configuration_utils.py", "src/transformers/models/bert/configuration_bert.py")], [ ("src/transformers/models/bert/configuration_bert.py", "tests/models/bert/test_modeling_bert.py"), ( "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ), ], ] # Order of the edges is random assert [set(v) for v in config_utils_tree] == [set(v) for v in expected_config_tree] def test_print_tree_deps_of(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) # There are two possible outputs since the order of the last two lines is non-deterministic. expected_std_out = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py""" expected_std_out_2 = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py tests/models/bert/test_modeling_bert.py src/transformers/models/bert/modeling_bert.py""" with patch_transformer_repo_path(tmp_folder), CaptureStdout() as cs: print_tree_deps_of("src/transformers/models/bert/modeling_bert.py") print_tree_deps_of("src/transformers/configuration_utils.py") assert cs.out.strip() in [expected_std_out, expected_std_out_2] def test_init_test_examples_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_example_deps = { "examples/flax/test_flax_examples.py": [ "examples/flax/text-classification/run_glue.py", "examples/flax/test_flax_examples.py", ], "examples/pytorch/test_pytorch_examples.py": [ "examples/pytorch/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", ], "examples/tensorflow/test_tensorflow_examples.py": [ "examples/tensorflow/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", ], } expected_examples = { "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } with patch_transformer_repo_path(tmp_folder): example_deps, all_examples = init_test_examples_dependencies() assert example_deps == expected_example_deps assert {str(f.relative_to(tmp_folder)) for f in all_examples} == expected_examples def test_create_reverse_dependency_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # impact of BERT modeling file (note that we stop at the inits and don't go down further) expected_bert_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/__init__.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/modeling_bert.py"]) == expected_bert_deps # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/utils/__init__.py", "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "src/transformers/configuration_utils.py", "src/transformers/modeling_utils.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/__init__.py"]) == expected_init_deps expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps # Test that with more models init of bert only gets deps to bert. create_tmp_repo(tmp_folder, models=["bert", "gpt2"]) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps def test_create_module_to_test_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] create_tmp_repo(tmp_folder, models=models) with patch_transformer_repo_path(tmp_folder): test_map = create_module_to_test_map(filter_models=True) expected_bert_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", "tests/models/bert/test_modeling_bert.py", } for model in models: if model != "bert": assert test_map[f"src/transformers/models/{model}/modeling_{model}.py"] == [ f"tests/models/{model}/test_modeling_{model}.py" ] else: assert set(test_map[f"src/transformers/models/{model}/modeling_{model}.py"]) == expected_bert_tests # Init got filtered expected_init_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", } assert set(test_map["src/transformers/__init__.py"]) == expected_init_tests def test_infer_tests_to_run(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes("src/transformers/models/bert/modeling_bert.py", BERT_MODEL_FILE_NEW_CODE, repo) example_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", } with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" assert set(example_tests_to_run.split(" ")) == example_tests # Fake a new model addition repo = create_tmp_repo(tmp_folder, models=models) branch = repo.create_head("new_model") branch.checkout() with open(tmp_folder / "src/transformers/__init__.py", "a") as f: f.write("from .models.t5 import T5Config, T5Model\n") model_dir = tmp_folder / "src/transformers/models/t5" model_dir.mkdir(exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("from .configuration_t5 import T5Config\nfrom .modeling_t5 import T5Model\n") with open(model_dir / "configuration_t5.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / "modeling_t5.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", "t5").replace("Bert", "T5") f.write(modeling_code) test_dir = tmp_folder / "tests/models/t5" test_dir.mkdir(exist_ok=True) (test_dir / "__init__.py").touch() with open(test_dir / "test_modeling_t5.py", "w") as f: f.write( "from transformers import T5Config, T5Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) repo.index.add(["src", "tests"]) repo.index.commit("Add T5 model") with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt") with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = { "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", "tests/models/t5/test_modeling_t5.py", "tests/test_modeling_common.py", } assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", filter_models=False) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = [f"tests/models/{name}/test_modeling_{name}.py" for name in models + ["t5"]] expected_tests = set(expected_tests + ["tests/test_modeling_common.py"]) assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests def test_infer_tests_to_run_with_test_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "tests/models/bert/test_modeling_bert.py", "from transformers import BertConfig, BertModel\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" def test_infer_tests_to_run_with_examples_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] repo = create_tmp_repo(tmp_folder, models=models) # Modification in one example trigger the corresponding test commit_changes( "examples/pytorch/text-classification/run_glue.py", "from transformers import BertModeln\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" # Modification in one test example file trigger that test repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "examples/pytorch/test_pytorch_examples.py", """test_args = "run_glue.py"\nmore_code""", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" def test_parse_commit_message(self): assert parse_commit_message("Normal commit") == {"skip": False, "no_filter": False, "test_all": False} assert parse_commit_message("[skip ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[ci skip] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip-ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip_ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[no filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no-filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no_filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[filter-no] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[test all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all test] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[test-all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all_test] commit") == {"skip": False, "no_filter": False, "test_all": True}
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/repo_utils/test_check_docstrings.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) from check_docstrings import get_default_description, replace_default_in_arg_description # noqa: E402 class CheckDostringsTested(unittest.TestCase): def test_replace_default_in_arg_description(self): # Standard docstring with default. desc_with_default = "`float`, *optional*, defaults to 2.0" self.assertEqual( replace_default_in_arg_description(desc_with_default, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_default, inspect._empty), "`float`") # Standard docstring with default but optional is not using the stars. desc_with_default_typo = "`float`, `optional`, defaults to 2.0" self.assertEqual( replace_default_in_arg_description(desc_with_default_typo, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_default_typo, 1.0), "`float`, *optional*, defaults to 1.0" ) # If the default is None we do not erase the value in the docstring. self.assertEqual( replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*, defaults to 2.0" ) # If the default is None (and set as such in the docstring), we do not include it. desc_with_default = "`float`, *optional*, defaults to None" self.assertEqual(replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*") desc_with_default = "`float`, *optional*, defaults to `None`" self.assertEqual(replace_default_in_arg_description(desc_with_default, None), "`float`, *optional*") # Operations are not replaced, but put in backtiks. desc_with_default = "`float`, *optional*, defaults to 1/255" self.assertEqual( replace_default_in_arg_description(desc_with_default, 1 / 255), "`float`, *optional*, defaults to `1/255`" ) desc_with_default = "`float`, *optional*, defaults to `1/255`" self.assertEqual( replace_default_in_arg_description(desc_with_default, 1 / 255), "`float`, *optional*, defaults to `1/255`" ) desc_with_optional = "`float`, *optional*" self.assertEqual( replace_default_in_arg_description(desc_with_optional, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_optional, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_optional, None), "`float`, *optional*") self.assertEqual(replace_default_in_arg_description(desc_with_optional, inspect._empty), "`float`") desc_with_no_optional = "`float`" self.assertEqual( replace_default_in_arg_description(desc_with_no_optional, 2.0), "`float`, *optional*, defaults to 2.0" ) self.assertEqual( replace_default_in_arg_description(desc_with_no_optional, 1.0), "`float`, *optional*, defaults to 1.0" ) self.assertEqual(replace_default_in_arg_description(desc_with_no_optional, None), "`float`, *optional*") self.assertEqual(replace_default_in_arg_description(desc_with_no_optional, inspect._empty), "`float`") def test_get_default_description(self): # Fake function to have arguments to test. def _fake_function(a, b: int, c=1, d: float = 2.0, e: str = "blob"): pass params = inspect.signature(_fake_function).parameters assert get_default_description(params["a"]) == "`<fill_type>`" assert get_default_description(params["b"]) == "`int`" assert get_default_description(params["c"]) == "`<fill_type>`, *optional*, defaults to 1" assert get_default_description(params["d"]) == "`float`, *optional*, defaults to 2.0" assert get_default_description(params["e"]) == '`str`, *optional*, defaults to `"blob"`'
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/extended/test_trainer_ext.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import re import sys from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, backend_device_count, execute_subprocess_async, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) MARIAN_MODEL = "sshleifer/student_marian_en_ro_6_1" MBART_TINY = "sshleifer/tiny-mbart" @require_torch class TestTrainerExt(TestCasePlus): def run_seq2seq_quick( self, distributed=False, extra_args_str=None, predict_with_generate=True, do_train=True, do_eval=True, do_predict=True, ): output_dir = self.run_trainer( eval_steps=1, max_len=12, model_name=MBART_TINY, num_train_epochs=1, distributed=distributed, extra_args_str=extra_args_str, predict_with_generate=predict_with_generate, do_train=do_train, do_eval=do_eval, do_predict=do_predict, ) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history if not do_eval: return eval_metrics = [log for log in logs if "eval_loss" in log.keys()] first_step_stats = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats last_step_stats = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"], float) assert not math.isnan(float(last_step_stats["eval_loss"])), "eval_loss must not be `nan`" @require_torch_non_multi_accelerator def test_run_seq2seq_no_dist(self): self.run_seq2seq_quick() # verify that the trainer can handle non-distributed with n_gpu > 1 @require_torch_multi_accelerator def test_run_seq2seq_dp(self): self.run_seq2seq_quick(distributed=False) # verify that the trainer can handle distributed with n_gpu > 1 @require_torch_multi_accelerator def test_run_seq2seq_ddp(self): self.run_seq2seq_quick(distributed=True) @require_apex @require_torch_gpu def test_run_seq2seq_apex(self): # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seq2seq_quick(distributed=True, extra_args_str="--fp16 --fp16_backend=apex") @parameterized.expand(["base", "low", "high", "mixed"]) @require_torch_multi_accelerator def test_trainer_log_level_replica(self, experiment_id): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout experiments = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } data = experiments[experiment_id] kwargs = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} log_info_string = "Running training" with CaptureStderr() as cl: self.run_seq2seq_quick(**kwargs, extra_args_str=data["extra_args_str"]) n_matches = len(re.findall(log_info_string, cl.err)) self.assertEqual(n_matches, data["n_matches"]) @slow def test_run_seq2seq(self): output_dir = self.run_trainer( eval_steps=2, max_len=128, model_name=MARIAN_MODEL, learning_rate=3e-4, num_train_epochs=10, distributed=False, ) # Check metrics logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history eval_metrics = [log for log in logs if "eval_loss" in log.keys()] first_step_stats = eval_metrics[0] last_step_stats = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"], float) # test if do_predict saves generations and metrics contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def test_run_seq2seq_bnb(self): from transformers.training_args import OptimizerNames def train_and_return_metrics(optim: str) -> Tuple[int, float]: extra_args = "--skip_memory_metrics 0" output_dir = self.run_trainer( max_len=128, model_name=MARIAN_MODEL, learning_rate=3e-4, num_train_epochs=1, optim=optim, distributed=True, # force run in a new process extra_args_str=extra_args, do_eval=False, do_predict=False, n_gpus_to_use=1, # to allow deterministic fixed memory usage ) # Check metrics logs = TrainerState.load_from_json(Path(output_dir, "trainer_state.json")).log_history gpu_peak_mem_mb = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20) gpu_alloc_mem_mb = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20) loss = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss gpu_peak_mem_orig, gpu_alloc_mem_orig, loss_orig = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value) gpu_peak_mem_bnb, gpu_alloc_mem_bnb, loss_bnb = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value) gpu_alloc_mem_diff = gpu_alloc_mem_orig - gpu_alloc_mem_bnb gpu_total_mem_orig = gpu_peak_mem_orig + gpu_alloc_mem_orig gpu_total_mem_bnb = gpu_peak_mem_bnb + gpu_alloc_mem_bnb gpu_total_mem_diff = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings expected_savings = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( gpu_alloc_mem_diff, expected_savings, "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB", ) self.assertGreater( gpu_total_mem_diff, expected_savings, "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB", ) self.assertEqual( loss_orig, loss_bnb, f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def run_trainer( self, max_len: int, model_name: str, num_train_epochs: int, learning_rate: float = 3e-3, optim: str = "adafactor", distributed: bool = False, extra_args_str: str = None, eval_steps: int = 0, predict_with_generate: bool = True, do_train: bool = True, do_eval: bool = True, do_predict: bool = True, n_gpus_to_use: int = None, ): data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" output_dir = self.get_auto_remove_tmp_dir() args_train = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(num_train_epochs)} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(eval_steps)} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() args_eval = f""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(eval_steps)} """.split() args_predict = """ --do_predict """.split() args = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: n_gpus_to_use = backend_device_count(torch_device) master_port = get_torch_dist_unique_port() distributed_args = f""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() cmd = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) else: testargs = ["run_translation.py"] + args with patch.object(sys, "argv", testargs): main() return output_dir
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/sample_text_no_unicode.txt
Text should be one-sentence-per-line, with empty lines between documents. This sample text is public domain and was randomly selected from Project Guttenberg. The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors. Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity. Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them. "Cass" Beard had risen early that morning, but not with a view to discovery. A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets. The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency. This was nearly opposite. Mr. Cassius crossed the highway, and stopped suddenly. Something glittered in the nearest red pool before him. Gold, surely! But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring. Looking at it more attentively, he saw that it bore the inscription, "May to Cass." Like most of his fellow gold-seekers, Cass was superstitious. The fountain of classic wisdom, Hypatia herself. As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge. From my youth I felt in me a soul above the matter-entangled herd. She revealed to me the glorious fact, that I am a spark of Divinity itself. A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's. There is a philosophic pleasure in opening one's treasures to the modest young. Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street. Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide; but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind. Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now. His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert; while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts. At last they reached the quay at the opposite end of the street; and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers. He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/dummy_feature_extractor_config.json
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/dummy-config.json
{ "model_type": "roberta" }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/test_entity_vocab.json
{"[MASK]": 0, "[UNK]": 1, "[PAD]": 2, "DUMMY": 3, "DUMMY2": 4, "[MASK2]": 5}
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/add_distilbert_like_config.json
{ "add_copied_from": true, "old_model_type": "distilbert", "new_model_patterns": { "model_name": "BERT New", "checkpoint": "huggingface/bert-new-base", "model_type": "bert-new", "model_lower_cased": "bert_new", "model_camel_cased": "BertNew", "model_upper_cased": "BERT_NEW", "config_class": "BertNewConfig", "tokenizer_class": "DistilBertTokenizer" }, "frameworks": [ "pt", "tf", "flax" ] }
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/sample_text.txt
This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত Text should be one-sentence-per-line, with empty lines between documents. This sample text is public domain and was randomly selected from Project Guttenberg. The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors. Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity. Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them. "Cass" Beard had risen early that morning, but not with a view to discovery. A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets. The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency. This was nearly opposite. Mr. Cassius crossed the highway, and stopped suddenly. Something glittered in the nearest red pool before him. Gold, surely! But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring. Looking at it more attentively, he saw that it bore the inscription, "May to Cass." Like most of his fellow gold-seekers, Cass was superstitious. The fountain of classic wisdom, Hypatia herself. As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge. From my youth I felt in me a soul above the matter-entangled herd. She revealed to me the glorious fact, that I am a spark of Divinity itself. A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's. There is a philosophic pleasure in opening one's treasures to the modest young. Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street. Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide; but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind. Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now. His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert; while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts. At last they reached the quay at the opposite end of the street; and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers. He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/merges.txt
#version: 0.2 Ġ l Ġl o Ġlo w e r
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/input.txt
Who was Jim Henson ? ||| Jim Henson was a puppeteer
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/vocab.txt
[PAD] [SEP] [MASK] [CLS] [unused3] [unused4] [unused5] [unused6] [unused7] [unused8]
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/vocab.json
{"l": 0, "o": 1, "w": 2, "e": 3, "r": 4, "s": 5, "t": 6, "i": 7, "d": 8, "n": 9, "Ġ": 10, "Ġl": 11, "Ġn": 12, "Ġlo": 13, "Ġlow": 14, "er": 15, "Ġlowest": 16, "Ġnewer": 17, "Ġwider": 18, "<unk>": 19, "<|endoftext|>": 20}
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/fixtures/preprocessor_config.json
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/GermEval/dev.txt
Gleich O darauf O entwirft O er O seine O Selbstdarstellung O " O Ecce B-OTH homo I-OTH " O in O enger O Auseinandersetzung O mit O diesem O Bild O Jesu B-PER . O 1980 O kam O der O Crown B-OTH als O Versuch O von O Toyota B-ORG , O sich O in O der O Oberen O Mittelklasse O zu O etablieren O , O auch O nach O Deutschland B-LOC . O – O 4:26 O # O Sometime B-OTH Ago/La I-OTH Fiesta I-OTH – O 23:18 O Alle O Stücke O wurden O von O Corea B-PER komponiert O mit O Ausnahme O der O einleitenden O Improvisation O zu O Sometime B-OTH Ago I-OTH . O Bis O 2013 O steigen O die O Mittel O aus O dem O EU-Budget B-ORGpart auf O rund O 120 O Millionen O Euro B-OTH . O Daraus O entwickelte O sich O im O Rokoko B-OTH die O Sitte O des O gemeinsamen O Weinens O im O Theater O , O das O die O Standesgrenzen O innerhalb O des O Publikums O überbrücken O sollte O . O Die O Spinne O hatte O sie O mit O Seidenfäden O an O ihrem O Schwanz O gefesselt O und O nach O oben O gezogen O . O In O Deutschland B-LOC ist O nach O StGB O eine O Anwerbung O für O die O Fremdenlegion O strafbar O . O Am O Donnerstag O wird O sich O zeigen O , O ob O die O Idee O der O DLR-Forscher B-ORGpart funktioniert O . O Der O sechste O Lauf O der O ADAC B-ORG GT I-ORG Mastersstand O ganz O klar O im O Mittelpunkt O des O Motorsport-Wochenendes O auf O dem O Eurospeedway B-ORG Lausitz I-ORG . O Nach O den O schwächeren O Vorgaben O der O Wall B-ORG Street I-ORG vom O Vortag O setzten O die O deutschen B-LOCderiv Standardwerte O ihren O Konsolidierungskurs O fort O . O Kolb B-PER war O seit O 1986 O im O Turnverein O als O Leiter O tätig O , O darunter O elf O Jahre O als O Hauptleiter O in O der O Männerriege O . O
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/GermEval/train.txt
Schartau B-PER sagte O dem O " O Tagesspiegel B-ORG " O vom O Freitag O , O Fischer B-PER sei O " O in O einer O Weise O aufgetreten O , O die O alles O andere O als O überzeugend O war O " O . O Firmengründer O Wolf B-PER Peter I-PER Bree I-PER arbeitete O Anfang O der O siebziger O Jahre O als O Möbelvertreter O , O als O er O einen O fliegenden O Händler O aus O dem O Libanon B-LOC traf O . O Ob O sie O dabei O nach O dem O Runden O Tisch O am O 23. O April O in O Berlin B-LOC durch O ein O pädagogisches O Konzept O unterstützt O wird O , O ist O allerdings O zu O bezweifeln O . O Bayern B-ORG München I-ORG ist O wieder O alleiniger O Top- O Favorit O auf O den O Gewinn O der O deutschen B-LOCderiv Fußball-Meisterschaft O . O Dabei O hätte O der O tapfere O Schlussmann O allen O Grund O gehabt O , O sich O viel O früher O aufzuregen O . O ARD-Programmchef B-ORGpart Günter B-PER Struve I-PER war O wegen O eines O vierwöchigen O Urlaubs O für O eine O Stellungnahme O nicht O erreichbar O . O Alternativ O sollten O sich O die O Restaurantbetreiber O aus O Sicht O der O Solingerin B-LOCderiv zu O längeren O Öffnungszeiten O verpflichten O , O um O wartende O Kunden O aufzunehmen O . O Die O Deutsche B-ORG Flugsicherung I-ORG ( O DFS B-ORG ) O beschloss O ein O Flugverbot O für O alle O internationalen O Flughäfen O mit O Ausnahme O der O beiden O Berliner B-LOCderiv Flughäfen O bis O 2.00 O Uhr O nachts O . O New O Small O Family O mit O E-Motor O : O Studie O E-Up O ! O Eine O Schwachstelle O war O beispielsweise O der O Spiegelkasten O . O Denn O durch O den O Einsatz O moderner O Fahrzeugtechnik O ( O Dieseltriebwagen O ) O und O schalldämmender O Fenster O entsteht O keine O Einschränkung O der O Wohnqualität O . O
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/GermEval/labels.txt
B-LOC B-LOCderiv B-LOCpart B-ORG B-ORGderiv B-ORGpart B-OTH B-OTHderiv B-OTHpart B-PER B-PERderiv B-PERpart I-LOC I-LOCderiv I-LOCpart I-ORG I-ORGderiv I-ORGpart I-OTH I-OTHderiv I-OTHpart I-PER I-PERderiv I-PERpart O
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/wmt_en_ro/test.json
{ "translation": { "en": "UN Chief Says There Is No Military Solution in Syria Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people. The U.N. chief again urged all parties, including the divided U.N. Security Council, to unite and support inclusive negotiations to find a political solution. Ban told a news conference Wednesday that he plans to meet with foreign ministers of the five permanent council nations - the U.S., Russia, China, Britain and France - on the sidelines of the General Assembly's ministerial session later this month to discuss Syria.", "ro": "Șeful ONU declară că nu există soluții militare în Siria Secretarul General Ban Ki-moon afirmă că răspunsul său la suportul militar al Rusiei pentru Siria este că „nu există o soluție militară” la conflictul care durează de aproape cinci ani iar mai multe arme nu ar face decât să agraveze violența și suferința a milioane de oameni. Șeful ONU a solicitat din nou tuturor părților, inclusiv Consiliului de securitate ONU divizat să se unifice și să susțină negocierile pentru a găsi o soluție politică. Ban a declarat miercuri în cadrul unei conferințe că intenționează să se întâlnească luna aceasta cu miniștrii de externe din cinci țări permanent prezente în consiliu - SUA, Rusia, China, Anglia și Franța - pe marginea sesiunii ministeriale a Adunării Generale pentru a discuta despre Siria." } } { "translation": { "en": "He expressed regret that divisions in the council and among the Syrian people and regional powers \"made this situation unsolvable.\" Ban urged the five permanent members to show the solidarity and unity they did in achieving an Iran nuclear deal in addressing the Syria crisis. 8 Poll Numbers That Show Donald Trump Is For Real Some have tried to label him a flip-flopper. Others have dismissed him as a joke. And some are holding out for an implosion. But no matter how some Republicans are trying to drag Donald Trump down from atop the polls, it hasn't worked (yet).", "ro": "Ban și-a exprimat regretul că divizările în consiliu și între poporul sirian și puterile regionale „au făcut această situație de nerezolvat”. Ban le-a cerut celor cinci membri permanenți să dea dovadă de solidaritatea și unitatea arătate atunci când au reușit să încheie un acord referitor la armele nucleare ale Iranului, abordând astfel criza din Siria. 8 cifre din sondaje care arată că Donald Trump are șanse reale Unii au încercat să îl eticheteze ca politician „flip-flop”. Alții l-au numit o glumă. Iar alții așteaptă implozia. Însă indiferent de modul în care unii republicani încearcă să îl dărâme pe Donald Trump din vârful sondajelor, nu a funcționat (încă)." } } { "translation": { "en": "Ten of the last 11 national polls have shown Donald Trump's lead at double digits, and some are starting to ask seriously what it means for the real estate mogul's nomination chances. Of course, it's still early in the election cycle. None of this is to say that Trump is likely to win the Republican nomination. Pundits point out that at this time in 2011, Rick Perry's lead was giving way to a rising Herman Cain, neither of whom won even one state in the nomination process. And there are many reasons he would struggle in a general election. But outside groups like Jeb Bush's Super PAC and the economic conservative group Club for Growth are recognizing Trump's staying power and beginning to unload their dollars to topple him.", "ro": "Zece din ultimele 11 sondaje naționale au arătat că Donald Trump conduce cu un procent din două cifre iar unele voci încep să se întrebe serios ce înseamnă acest lucru pentru șansele de numire ale mogulului imobiliar. Desigur, este încă prematur. Nimic din toate acestea nu spune că Trump va câștiga cursa pentru nominalizarea republicanilor. Pundits arată că, în aceeași perioadă a anului 2011, avansul lui Rick Perry îi făcea loc lui Herman Cain în sondaje, dar niciunul dintre ei nu a câștigat în vreun stat în cursa de nominalizare. Iar motivele pentru care s-ar lupta din greu la alegerile generale sunt numeroase. Însă grupurile din exterior precum Super PAC al lui Jeb Bush și grupul conservator economic Club for Growth admit puterea lui Trump și încep să îl susțină cu bani." } } { "translation": { "en": "Here are some recent poll numbers that suggest that the real estate mogul isn't just a passing phase: Trump's favorability ratings have turned 180 degrees. Right before Donald Trump announced his candidacy in mid-June, a Monmouth University poll showed only two in 10 Republicans had a positive view of the real estate mogul. By mid-July, it was 40 percent. In early August, it was 52 percent. Now, six in 10 Republicans have a favorable view of Donald Trump. Roughly three in 10 say they have a negative view. And these numbers hold up in early states. A Quinnipiac poll in Iowa last week found that 60 percent of Republicans there had a favorable view of Trump.", "ro": "În continuare vă prezentăm câteva cifre din sondaje recente care sugerează că mogulul imobiliar nu este doar ceva trecător: Cifrele care indică susținerea față de Trump s-au întors la 180 grade. Chiar înainte ca Donald Trump să își anunțe candidatura, la mijlocul lui iunie, un sondaj realizat de Universitatea din Monmouth arăta că doar doi din 10 republicani aveau o părere pozitivă despre mogulul imobiliar. Până la mijlocul lui iulie, procentul a urcat la 40%. La începutul lui august, era 52%. În prezent, șase din 10 republicani au o părere favorabilă despre Donald Trump. Aproximativ trei din 10 declară că au o părere negativă. Aceste cifre se mențin. Un sondaj realizat săptămâna trecută de Quinnipiac în Iowa a concluzionat că 60% dintre republicanii din regiune au o părere favorabilă despre Trump." } } { "translation": { "en": "Two-thirds of GOP voters would be happy with Trump as the nominee. In a CNN/ORC poll last week, 67 percent of Republicans said they would be either \"enthusiastic\" or \"satisfied\" if Trump were the nominee. Only two in 10 say they would be \"upset\" if he were the nominee. Only Ben Carson generates roughly the same level of enthusiasm as Trump (43 percent say they would be \"enthusiastic\" vs. 40 percent who say the same of Trump). The next closest in enthusiasm? Marco Rubio with only 21 percent.", "ro": "Două treimi dintre alegătorii GOP ar fi fericiți dacă Trump ar câștiga cursa pentru nominalizare. Într-un sondaj realizat săptămâna trecută de CNN/ORC, 67% dintre republicani au declarat că ar fi „entuziasmați” sau „mulțumiți” dacă Trump ar câștiga cursa pentru nominalizare. Doar doi din 10 declară că ar fi „supărați” dacă Trump ar câștiga cursa pentru nominalizare. Doar Ben Carson generează aproximativ același nivel de entuziasm ca Trump (43% declară că ar fi „entuziasmați” față de 40% care declară același lucru despre Trump). Cel mai aproape în ceea ce privește entuziasmul? Marco Rubio, cu doar 21%." } } { "translation": { "en": "On the flip side, 47 percent of Republican voters say they would be \"dissatisfied\" or \"upset\" if establishment favorite Jeb Bush becomes the nominee. A majority of Republicans don't see Trump's temperament as a problem. While Donald Trump has been widely criticized for his bombast and insults, 52 percent of leaned Republican voters nationwide think that the real estate mogul has the right temperament to be president, according to Monday's ABC News/Washington Post poll. The same number holds in the first-in-the-nation caucus state of Iowa, where the same 52 percent of Republicans think he has the personality to be commander in chief, according to Quinnipiac last week.", "ro": "De partea cealaltă, 47% dintre alegătorii republicani afirmă că ar fi „nemulțumiți” sau „supărați” dacă favoritul Jeb Bush câștigă cursa pentru nominalizare. Majoritatea republicanilor nu consideră temperamentul lui Trump o problemă. Deși Donald Trump a fost puternic criticat pentru insultele aduse și stilul său bombastic, 52% dintre alegătorii republicani la nivel național consideră că mogulul imobiliar are temperamentul potrivit pentru a fi președinte, conform sondajului realizat luni de ABC News/Washington Post. Regăsim aceleași cifre în statul Iowa, unde tot 52% dintre republicani cred că Trump are personalitatea potrivită pentru a fi conducător, conform sondajului realizat săptămâna trecută de Quinnipiac." } } { "translation": { "en": "Still, 44 percent think he doesn't have the personality to serve effectively, and almost six in 10 independents say his temperament does not belong in the White House, according to ABC/Post. Republican voters are getting used to the idea. When they put on their pundit hats, Republican voters think Trump is for real. When asked who is most likely to win the GOP nomination, four in 10 said Trump was the best bet, according to a CNN/ORC poll out last week. That's a change from when four in 10 placed their money on Jeb Bush in late July. Full disclosure: GOP voters haven't had the clearest crystal ball in the past.", "ro": "Totuși, 44% sunt de părere că nu are personalitatea necesară pentru a acționa eficient și aproape șase din 10 independenți afirmă că temperamentul său nu are ce căuta la Casa Albă, conform ABC/Post. Alegătorii republicani se obișnuiesc cu ideea. Atunci când iau atitudinea de intelectuali, alegătorii republicani consideră că Trump este autentic. Conform unui sondaj realizat săptămâna trecută de CNN/ORC, la întrebarea cine are cele mai multe șanse să câștige cursa pentru nominalizare GOP, patru din 10 au declarat că Trump. Situația s-a schimbat față de finalul lui iulie, când patru din 10 ar fi pariat pe Jeb Bush. Informare completă: în trecut, alegătorii GOP nu au citit foarte bine viitorul." } } { "translation": { "en": "At this time last cycle, four in 10 Republicans picked Rick Perry to win the nomination, vs. only 28 percent for eventual nominee Mitt Romney. Still, it shows that a plurality of GOP voters see Trump's campaign as plausible. Even if Republicans rallied around another candidate, Trump still beats almost everyone. Some pundits point out that the splintered field is likely contributing to Trump's lead, while anti-Trump support is be spread diffusely among more than a dozen other candidates. But a Monmouth University poll in early September shows that, in a hypothetical head-to-head matchup between Trump and most other Republican candidates, Trump almost always garners majority support.", "ro": "În aceeași perioadă a ultimelor alegeri, patru din 10 republicani l-au ales pe Rick Perry în cursa pentru nominalizare, față de doar 28% pentru Mitt Romney. Însă, aceste cifre arată că majoritatea alegătorilor GOP consideră plauzibilă campania lui Trump. Chiar dacă republicanii sau repliat spre un alt candidat. Trump încă se află în fruntea tuturor. Unele voci spun că situația divizată va contribui probabil la victoria lui Trump, în timp ce susținerea contra lui Trump se va împărți la mai mult de doisprezece candidați. Însă un sondaj derulat la începutul lui septembrie de Universitatea din Monmouth arată că, în situația ipotetică a unei colaborări între Trump și majoritatea celorlalți candidați republicani, aproape întotdeauna Trump va beneficia de susținerea majoritară." } } { "translation": { "en": "He leads Carly Fiorina by 13 points, Marco Rubio by 14 points, Walker by 15 points, Jeb Bush by 19 points, and, finally, Rand Paul, John Kasich and Chris Christie by 33 points each. He's in a dead heat with Ted Cruz. The only candidate who beats him? Ben Carson would lead the businessman by a wide 19 points in a hypothetical head-to-head. A bare majority of Donald Trump's supporters say they've made up their minds. A new CBS/NYT poll out on Tuesday shows that just more than half of voters who support Trump say they have locked in their votes. Obviously, a lot can happen to change that, and no one can really say they would never change their mind.", "ro": "Trump se află la distanță de 13 puncte de Carly Fiorina, la 14 puncte de Marco Rubio, la 15 puncte de Walker, la 19 puncte de Jeb Bush și, în cele din urmă, la câte 33 de puncte față de Rand Paul, John Kasich și Chris Christie. Este aproape la egalitate cu Ted Cruz. Singurul candidat care îl învinge? Ben Carson l-ar învinge pe omul de afaceri cu 19 puncte într-o confruntare ipotetică de unu la unu. Majoritatea susținătorilor lui Donald Trump declară că s-au decis. Un nou sondaj realizat marți de CBS/NYT arată că peste jumătate dintre alegătorii care îl susțin pe Trump declară că nu își schimbă opțiunea de vot. Evident, se pot întâmpla multe în acest sens și nimeni nu poate spune că aceștia nu se vor răzgândi niciodată." } } { "translation": { "en": "46 percent said they are leaving the door open to switching candidates. Still, Trump's strongest competition at the moment is from fellow outsider neurosurgeon Ben Carson, but voters who say they have made up their minds are twice as likely to go for Trump. Six in 10 Republicans say they agree with Trump on immigration. Even since Donald Trump called immigrants from Mexico \"rapists\" in his campaign announcement speech two months ago, immigration has been front and center in the 2016 conversation. Some are worried that Trump's bombast will drive crucial Hispanic voters away from the Republican Party and damage rebranding efforts.", "ro": "46% afirmă că lasă portița deschisă posibilității de a-și schimba opțiunea. Cu toate acestea, cel mai important adversar al lui Trump este în prezent neurochirurgul Ben Carson, însă este de două ori mai probabil ca alegătorii care declară că s-au decis să voteze cu Trump. Șase din 10 republicani afirmă că sunt de acord cu Trump în problema imigrării. De când Donald Trump i-a numit pe imigranții din Mexic „violatori” în discursul de deschidere a campaniei sale, în urmă cu două luni, imigrarea a fost subiectul central în campania pentru 2016. Unii sunt îngrijorați că stilul bombastic al lui Trump va duce la o scindare între alegătorii hispanici importanți și Partidul Republican și va prejudicia eforturile de rebranding." } } { "translation": { "en": "But according to Monday's new ABC/Post poll, six in 10 Republicans say they agree with Trump on immigration issues. So as long as immigration remains in the spotlight, it seems Donald Trump will remain too. Frustration with government is climbing to new highs. Donald Trump and Ben Carson now account for roughly half of the support from Republican voters, largely due to their outsider status. Six in 10 Republicans in Monday's new ABC/Post poll say they want a political outsider over someone with government experience. And they are angry at Washington, too.", "ro": "Însă, conform sondajului realizat luni de ABC/Post, șase din 10 republicani afirmă că sunt de acord cu Trump în problema imigrării. Așa că, se pare că atâta timp cât problema imigrării rămâne în lumina reflectoarelor, la fel va rămâne și Doland Trump. Frustrarea față de autorități atinge noi culmi. Donald Trump și Ben Carson sunt acum susținuți de aproape jumătate dintre alegătorii republicani, în mare parte datorită statutului lor de outsideri. Conform sondajului realizat luni de ABC/Post, șase din 10 republicani afirmă că preferă un outsider politic în detrimentul cuiva cu experiență în guvernare. Oamenii sunt de asemenea supărați pe autoritățile de la Washington." } } { "translation": { "en": "A Des Moines Register/Bloomberg poll in Iowa from two weeks ago shows that three in four Iowa Republicans are frustrated with Republicans in Congress, with 54 percent \"unsatisfied\" and 21 percent \"mad as hell.\" Jeremy Corbyn to make debut at Prime Minister's Questions Since his election, Mr Corbyn's debut at PMQs has been keenly awaited New Labour leader Jeremy Corbyn is to make his debut at Prime Minister's Questions later, taking on David Cameron for the first time.", "ro": "Un sondaj derulat în urmă cu două săptămâni în Iowa de către Des Moines Register/Bloomberg arată că trei din patru republicani din Iowa sunt frustrați de prestația republicanilor din COngres, 54% declarându-se „nemulțumiți” iar 21% „nervoși la culme”. Jeremy Corbyn își face debutul la Prime Minister's Questions Încă de la alegerea sa, debutul domnului Corbyn la PMQs a fost îndelung așteptat Noul lider al Partidului Laburist, Jeremy Corbyn, își va face mai târziu debutul la Prime Minister's Questions, confruntându-se pentru prima dată cu David Cameron." } } { "translation": { "en": "Mr Corbyn will rise to ask the first of his six allotted questions shortly after midday, with his performance likely to be closely scrutinised by the media and Labour MPs. He has called for \"less theatre and more facts\" at the weekly showpiece. He has also said he could skip some sessions, leaving them to colleagues. The encounter will be the first parliamentary test of Mr Corbyn's leadership, coming after his appointment of a shadow cabinet and his speech to the TUC annual congress on Tuesday.", "ro": "Dl Corbyn va adresa primele dintre cele șase întrebări la care are dreptul la scurt timp după prânz; prestația sa va fi probabil analizată îndeaproape de mass-media și parlamentarii laburiști. În cadrul aparițiilor săptămânale, el a cerut „mai puțin teatru și mai multe fapte”. A declarat de asemenea că poate renunța la câteva participări și că le cedează colegilor săi. Confruntarea va fi primul test parlamentar al Dl Corbyn în poziție de lider, venind după ce a numit un „cabinet fantomă” și după discursul pe care l-a ținut marți la congresul anual TUC." } } { "translation": { "en": "Meanwhile, the Labour leader's decision to stand in silence during the singing of the national anthem at a service on Tuesday to mark the 75th anniversary of the Battle of Britain has attracted criticism from a number of Tory MPs and is the focus of several front page stories in the newspapers. Mr Corbyn's decision not to sing the national anthem has attracted attention A spokesman for Mr Corbyn said he had \"stood in respectful silence\" and did recognise the \"heroism of the Royal Air Force in the Battle of Britain.\"", "ro": "Între timp, decizia liderului Partidului laburist de a păstra tăcerea la rostirea imnului național în cadrul unei slujbe ținute marți cu ocazia aniversării a 75 de ani de la Bătălia Angliei a atras critici din partea unor parlamentari conservatori și a ținut prima pagină a ziarelor. Decizia domnului Corbyn de a nu cânta imnul național a atras atenția Un purtător de cuvânt al Dl Corbyn a declarat că acesta „a păstrat tăcerea în mod respectuos” și a recunoscut „eroismul Forțelor aeriene britanice în Bătălia Angliei.”" } } { "translation": { "en": "But a member of Mr Corbyn's shadow cabinet, Owen Smith, told BBC Two's Newsnight programme he would have advised the Labour leader to sing the national anthem \"irrespective\" of his belief that the monarchy should be abolished. Nearly a dozen shadow ministers have refused to serve in Mr Corbyn's top team, citing differences over the economy, defence and foreign affairs, while less than a sixth of the parliamentary party originally backed him as leader. BBC political correspondent Robin Brant says policy differences are also \"stacking up\" within Labour following Mr Corbyn's appointment over its position on the European Union and the government's cap on benefits.", "ro": "Însă un membru al cabinetului fantomă al Dl Corbyn, Owen Smith, a declarat pentru emisiunea Two's Newsnight transmisă de BBC că i-ar fi recomandat liderului laburist să cânte imnul național „indiferent” de credința sa că monarhia ar trebui abolită. În jur de doisprezece miniștri din cabinetul fantomă au refuzat să facă parte din echipa de frunte a Dl Corbyn, argumentând prin diferențe de opinie legate de economie, apărare și externe, în timp ce mai puțin de o șesime din partidul parlamentar l-a susținut ca lider. Corespondentul politic al BBC, Robin Brant, declară că diferențele de politică „se cumulează” în Partidul Laburist după numirea domnului Corbyn referitor la poziția sa față de Uniunea Europeană și limita de beneficii." } } { "translation": { "en": "Mr Corbyn told the TUC conference Labour was putting forward amendments to remove the whole idea of a cap altogether. Hours later Mr Smith, the shadow work and pensions secretary, said the party was \"very clear\" that it was only opposing government plans to reduce the level of cap from £26,000 to £23,000. Mr Corbyn will be the fifth Labour leader that David Cameron has faced across the despatch box over the past decade since he became Tory leader. The Labour leader, who has promised a different approach to politics, says he has \"crowd sourced\" ideas for questions to ask Mr Cameron and has been given more than 30,000 suggestions.", "ro": "Dl Corbyn a declarat la conferința TUC că Partidul Laburist va aduce modificări prin care se va elimina integral ideea limitării. Câteva ore mai târziu, Dl Smith, Ministrul Muncii și Pensiilor, a declarat că partidul „este foarte clar” în opoziția exclusivă față de planurile guvernului de a reduce nivelul „cap” de la 26.000 lire la 23.000 lire. Dl Corbyn va fi al cincilea lider laburist cu care se confruntă David Cameron la tribună în ultimul deceniu, de când a preluat conducerea Partidului Conservator. Liderul laburist, care a promis o abordare diferită a politicii, spune că are idei „din surse externe” pentru întrebări pe care să i le adreseze Domnului Cameron și că a primit peste 30.000 de sugestii." } } { "translation": { "en": "The Islington North MP has said PMQs is too confrontational and that he will refrain from both \"repartee\" and trading barbs, instead vowing to focus on serious issues such as poverty, inequality and the challenges facing young people. Mr Corbyn has said that Angela Eagle, the shadow business secretary, will deputise for him at PMQs when he does not attend - for instance when Mr Cameron is travelling abroad. He has also floated the idea of allowing other colleagues to take the floor on occasion, saying he had approached the Commons Speaker John Bercow to discuss the issue.", "ro": "Parlamentarul Islington North a afirmat că PMQs implică un nivel de confruntare prea înalt și că se va abține de la replici și atacuri, angajându-se să se concentreze în schimb pe probleme serioase precum sărăcia, inegalitatea și provocările cu care se confruntă tinerii. Dl Corbyn a declarat că Angela Eagle, Ministrul de finanțe, îi va ține locul la PMQs atunci când el nu poate participa - de exemplu atunci când Dl Cameron se deplasează în străinătate. A exprimat de asemenea ideea că va permite altor colegi să ia cuvântul ocazional, spunând că l-a abordat pe Președintele Camerei Deputaților, John Bercow, pentru a discuta acest aspect." } } { "translation": { "en": "When he became leader in 2005, Mr Cameron said he wanted to move away from the \"Punch and Judy\" style of politics often associated with PMQs but admitted some years later that he had failed. Since it was first televised in 1990, PMQs has been seen as a key barometer of a leader's judgement, their command of the Commons and their standing among their fellow MPs although critics have argued it has become a caricature and is in need of far-reaching reforms. 'Shot in Joburg': Homeless youth trained as photographers Downtown Johannesburg is a tough place to be homeless.", "ro": "În 2005, când a preluat conducerea, Dl Cameron a declarat că dorește să renunțe la stilul politic „Punch and Judy” asociat adesea cu PMQs însă a recunoscut câțiva ani mai târziu că nu a reușit în demersul său. De la prima transmisie, în 1990, PMQs a fost considerată un barometru cheie al raționamentului unui lider, al modului în care acesta conduce Camera Deputaților și a poziției sale în rândul colegilor parlamentari, deși criticii afirmă a ca devenit o caricatură și că are nevoie de o reformare profundă. „Cadru în Joburg”: Tineri fără adăpost beneficiază de cursuri de fotografie Este dificil să fii un om fără adăpost în Johannesburg." } } { "translation": { "en": "But one group of former street children have found a way to learn a skill and make a living. \"I was shot in Joburg\" is a non-profit studio that teaches homeless youngsters how to take photographs of their neighbourhood and make a profit from it. BBC News went to meet one of the project's first graduates. JD Sports boss says higher wages could hurt expansion JD Sports Executive Chairman Peter Cowgill says a higher minimum wage for UK workers could mean \"more spending power in the pockets of potential consumers.\" But that spending power is unlikely to outweigh the higher labour costs at his firm, he says.", "ro": "Însă un grup de oameni care au trăit pe străzi în copilărie au găsit un mod de a învăța o meserie și de a-și câștiga traiul. „I was shot în Joburg” este un studio non-profit care îi învață pe tinerii fără adăpost să facă fotografii ale zonelor în care trăiesc și să câștige bani din asta. BBC News s-a întâlnit cu unul dintre primii absolvenți ai proiectului. Șeful JD Sports spune că salariile mai mari ar putea dăuna extinderii Președintele JD Sports, Peter Cowgill, declară că o creștere a salariului minim în Marea Britanie ar putea însemna „o putere de cumpărare mai mare în buzunarele potențialilor consumatori.” Este însă puțin probabil ca respectiva putere de cumpărare să depășească costurile mai mari pentru forța de muncă în cadrul firmei, afirmă el." } } { "translation": { "en": "The costs could hit JD Sports' expansion plans, he added, which could mean fewer extra jobs. Thanasi Kokkinakis backed by Tennis Australia president Steve Healy Thanasi Kokkinakis deserves kudos rather than criticism for his behaviour. Thanasi Kokkinakis has been the collateral damage in the recent storm around his friend Nick Kyrgios and deserves kudos rather than criticism for his own behaviour, according to Tennis Australia president Steve Healy.", "ro": "Costurile ar putea avea impact asupra planurilor de extindere ale JD Sports, a adăugat el, ceea ce ar putea însemna mai puține locuri de muncă noi. Thanasi Kokkinakis susținut de președintele Tennis Australia, Steve Healy Thanasi Kokkinakis ar merita să fie lăudat și nu criticat pentru comportamentul său. Thanasi Kokkinakis a fost victimă colaterală în „furtuna” creată în jurul prietenului său, Nick Kyrgios, iar comportamentul său merită mai degrabă cuvinte de laudă și nu critică, în opinia președintelui Tennis Australia, Steve Healy." } }
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/wmt_en_ro/train.json
{ "translation": { "en": "Corrections to votes and voting intentions: see Minutes Assignment conferred on a Member: see Minutes Membership of committees and delegations: see Minutes Decisions concerning certain documents: see Minutes Forwarding of texts adopted during the sitting: see Minutes Dates for next sittings: see Minutes", "ro": "Corectările voturilor şi intenţiile de vot: a se vedea procesul-verbal Misiune încredinţată unui deputat: consultaţi procesul-verbal Componenţa comisiilor şi a delegaţiilor: a se vedea procesul-verbal Decizii privind anumite documente: a se vedea procesul-verbal Transmiterea textelor adoptate în cursul prezentei şedinţe: a se vedea procesul-verbal Calendarul următoarelor şedinţe: a se vedea procesul-verbal" } } { "translation": { "en": "Membership of Parliament: see Minutes Approval of Minutes of previous sitting: see Minutes Membership of Parliament: see Minutes Verification of credentials: see Minutes Documents received: see Minutes Written statements and oral questions (tabling): see Minutes Petitions: see Minutes Texts of agreements forwarded by the Council: see Minutes Action taken on Parliament's resolutions: see Minutes Agenda for next sitting: see Minutes Closure of sitting (The sitting was closed at 7.45 p.m.)", "ro": "Componenţa Parlamentului: a se vedea procesul-verbal Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal Componenţa Parlamentului: a se vedea procesul-verbal Verificarea prerogativelor: a se vedea procesul-verbal Depunere de documente: a se vedea procesul-verbal Declaraţii scrise şi întrebări orale (depunere): consultaţi procesul-verbal Petiţii: a se vedea procesul-verbal Transmiterea de către Consiliu a textelor acordurilor: a se vedea procesul-verbal Cursul dat rezoluţiilor Parlamentului: a se vedea procesul-verbal Ordinea de zi a următoarei şedinţe: a se vedea procesul-verbal Ridicarea şedinţei (Se levanta la sesión a las 19.45 horas)" } } { "translation": { "en": "Election of Vice-Presidents of the European Parliament (deadline for submitting nominations): see Minutes (The sitting was suspended at 12.40 p.m. and resumed at 3.00 p.m.) Election of Quaestors of the European Parliament (deadline for submitting nominations): see Minutes (The sitting was suspended at 3.25 p.m. and resumed at 6.00 p.m.) Agenda for next sitting: see Minutes Closure of sitting (The sitting was closed at 6.15 p.m.) Opening of the sitting (The sitting was opened at 9.35 a.m.) Documents received: see Minutes Approval of Minutes of previous sitting: see Minutes Membership of Parliament: see Minutes", "ro": "Alegerea vicepreşedinţilor Parlamentului European (termenul de depunere a candidaturilor): consultaţi procesul-verbal (Die Sitzung wird um 12.40 Uhr unterbrochen und um 15.00 Uhr wiederaufgenommen). Alegerea chestorilor Parlamentului European (termenul de depunere a candidaturilor): consultaţi procesul-verbal (Die Sitzung wird um 15.25 Uhr unterbrochen und um 18.00 Uhr wiederaufgenommen). Ordinea de zi a următoarei şedinţe: a se vedea procesul-verbal Ridicarea şedinţei (Die Sitzung wird um 18.15 Uhr geschlossen.) Deschiderea şedinţei (Die Sitzung wird um 9.35 Uhr eröffnet.) Depunerea documentelor: a se vedea procesul-verbal Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal Componenţa Parlamentului: a se vedea procesul-verbal" } } { "translation": { "en": "Membership of committees (deadline for tabling amendments): see Minutes (The sitting was suspended at 7 p.m. and resumed at 9 p.m.) Agenda for next sitting: see Minutes Closure of sitting (The sitting was suspended at 23.25 p.m.) Documents received: see Minutes Communication of Council common positions: see Minutes (The sitting was suspended at 11.35 a.m. and resumed for voting time at noon) Approval of Minutes of previous sitting: see Minutes Committee of Inquiry into the crisis of the Equitable Life Assurance Society (extension of mandate): see Minutes", "ro": "Componenţa comisiilor (termenul de depunere a amendamentelor): consultaţi procesul-verbal (La seduta, sospesa alle 19.00, è ripresa alle 21.00) Ordinea de zi a următoarei şedinţe: a se vedea procesul-verbal Ridicarea şedinţei (Die Sitzung wird um 23.25 Uhr geschlossen.) Depunerea documentelor: a se vedea procesul-verbal Comunicarea poziţiilor comune ale Parlamentului: a se vedea procesul-verbal (La séance, suspendue à 11h35 dans l'attente de l'Heure des votes, est reprise à midi) Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal Comisia de anchetă privind criza societăţii de asigurări \"Equitable Life” (prelungirea mandatului): consultaţi procesul-verbal" } } { "translation": { "en": "Announcement by the President: see Minutes 1. Membership of committees (vote) 2. Amendment of the ACP-EC Partnership Agreement (vote) 4. Certification of train drivers operating locomotives and trains on the railway system in the Community (vote) 6. Law applicable to non-contractual obligations (\"ROME II\") (vote) 8. Seventh and eighth annual reports on arms exports (vote) Corrections to votes and voting intentions: see Minutes Membership of committees and delegations: see Minutes Request for waiver of parliamentary immunity: see Minutes Decisions concerning certain documents: see Minutes", "ro": "Comunicarea Preşedintelui: consultaţi procesul-verbal 1. Componenţa comisiilor (vot) 2. Modificarea Acordului de parteneriat ACP-CE (\"Acordul de la Cotonou”) (vot) 4. Certificarea mecanicilor de locomotivă care conduc locomotive şi trenuri în sistemul feroviar comunitar (vot) 6. Legea aplicabilă obligaţiilor necontractuale (\"Roma II”) (vot) 8. Al şaptelea şi al optulea raport anual privind exportul de armament (vot) Corectările voturilor şi intenţiile de vot: a se vedea procesul-verbal Componenţa comisiilor şi a delegaţiilor: a se vedea procesul-verbal Cerere de ridicare a imunităţii parlamentare: consultaţi procesul-verbal Decizii privind anumite documente: a se vedea procesul-verbal" } } { "translation": { "en": "Written statements for entry", "ro": "Declaraţii scrise înscrise" } } { "translation": { "en": "Written statements for entry in the register (Rule 116): see Minutes Forwarding of texts adopted during the sitting: see Minutes Dates for next sittings: see Minutes Adjournment of the session I declare the session of the European Parliament adjourned. (The sitting was closed at 1 p.m.) Approval of Minutes of previous sitting: see Minutes Membership of Parliament: see Minutes Request for the defence of parliamentary immunity: see Minutes Appointments to committees (proposal by the Conference of Presidents): see Minutes Documents received: see Minutes Texts of agreements forwarded by the Council: see Minutes", "ro": "Declaraţii scrise înscrise în registru (articolul 116 din Regulamentul de procedură): a se vedea procesul-verbal Transmiterea textelor adoptate în cursul prezentei şedinţe: a se vedea procesul-verbal Calendarul următoarelor şedinţe: a se vedea procesul-verbal Întreruperea sesiunii Dichiaro interrotta la sessione del Parlamento europeo. (La seduta è tolta alle 13.00) Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal Componenţa Parlamentului: a se vedea procesul-verbal Cerere de apărare a imunităţii parlamentare: consultaţi procesul-verbal Numiri în comisii (propunerea Conferinţei preşedinţilor): consultaţi procesul-verbal Depunerea documentelor: a se vedea procesul-verbal Transmiterea de către Consiliu a textelor acordurilor: a se vedea procesul-verbal" } } { "translation": { "en": "Action taken on Parliament's resolutions: see Minutes Oral questions and written statements (tabling): see Minutes Written statements (Rule 116): see Minutes Agenda: see Minutes 1. Appointments to parliamentary committees (vote): see Minutes Voting time Agenda for next sitting: see Minutes Closure of sitting (The sitting was closed at 12 midnight) Opening of the sitting (The sitting was opened at 09.05) Documents received: see Minutes Approval of Minutes of previous sitting: see Minutes 1. Protection of passengers against displaced luggage (vote) 2.", "ro": "Continuări ale rezoluţiilor Parlamentului: consultaţi procesul-verbal Declaraţii scrise şi întrebări orale (depunere): consultaţi procesul-verbal Declaraţii scrise (articolul 116 din Regulamentul de procedură) Ordinea de zi: a se vedea procesul-verbal 1. Numiri în comisiile parlamentare (vot): consultaţi procesul-verbal Timpul afectat votului Ordinea de zi a următoarei şedinţe: a se vedea procesul-verbal Ridicarea şedinţei (La seduta è tolta alle 24.00) Deschiderea şedinţei (The sitting was opened at 09.05) Depunerea documentelor: a se vedea procesul-verbal Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal 1. Protecţia pasagerilor împotriva deplasării bagajelor (vot) 2." } } { "translation": { "en": "Approval of motor vehicles with regard to the forward field of vision of the driver (vote) 3. EC-Korea Agreement on scientific and technological cooperation (vote) 4. Mainstreaming sustainability in development cooperation policies (vote) 5. Draft Amending Budget No 1/2007 (vote) 7. EC-Gabon Fisheries Partnership (vote) 10. Limitation periods in cross-border disputes involving personal injuries and fatal accidents (vote) 12. Strategy for a strengthened partnership with the Pacific Islands (vote) 13. The European private company statute (vote) That concludes the vote.", "ro": "Omologarea vehiculelor cu motor cu privire la câmpul de vizibilitate înainte al conducătorului auto (vot) 3. Acordul CE-Coreea de cooperare ştiinţifică şi tehnologică (vot) 4. Integrarea durabilităţii în politicile de cooperare pentru dezvoltare (vot) 5. Proiect de buget rectificativ nr.1/2007 (vot) 7. Acordul de parteneriat în domeniul pescuitului între Comunitatea Europeană şi Republica Gaboneză (vot) 10. Termenele de prescripţie aplicabile în cadrul litigiilor transfrontaliere cu privire la vătămările corporale şi accidentele mortale (vot) 12. Relaţiile UE cu insulele din Pacific: Strategie pentru un parteneriat consolidat (vot) 13. Statutul societăţii private europene (vot) Damit ist die Abstimmungsstunde beendet." } } { "translation": { "en": "Corrections to votes and voting intentions: see Minutes Assignment conferred on a Member: see Minutes Membership of committees and delegations: see Minutes Decisions concerning certain documents: see Minutes Forwarding of texts adopted during the sitting: see Minutes Dates for next sittings: see Minutes", "ro": "Corectările voturilor şi intenţiile de vot: a se vedea procesul-verbal Misiune încredinţată unui deputat: consultaţi procesul-verbal Componenţa comisiilor şi a delegaţiilor: a se vedea procesul-verbal Decizii privind anumite documente: a se vedea procesul-verbal Transmiterea textelor adoptate în cursul prezentei şedinţe: a se vedea procesul-verbal Calendarul următoarelor şedinţe: a se vedea procesul-verbal" } } { "translation": { "en": "Written statements for entry", "ro": "Declaraţii scrise înscrise" } }
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/wmt_en_ro/val.json
{ "translation": { "en": "Brazil's Former Presidential Chief-of-Staff to Stand Trial A federal judge on Tuesday accepted the charges filed against Brazil's former presidential chief of staff for his alleged involvement in a massive corruption scheme at state-owned oil company Petrobras. The federal prosecutor's office said Jose Dirceu will face trial on the corruption, racketeering and money laundering charges filed earlier this month. Fourteen other people will also be tried, including Joao Vaccari Neto, the former treasurer of Brazil's governing Workers' Party and Renato de Souza Duque, Petrobras' former head of corporate services.", "ro": "Fostul șef al cabinetului prezidențial brazilian este adus în fața instanței Marți, un judecător federal a acceptat acuzațiile aduse împotriva fostului șef al cabinetului prezidențial brazilian pentru presupusa implicare a acestuia într-o schemă masivă de corupție privind compania petrolieră de stat Petrobras. Biroul procurorului federal a declarat că Jose Dirceu va fi trimis în judecată pentru acuzațiile de corupție, înșelătorie și spălare de bani aduse în această lună. Alte paisprezece persoane vor fi judecate, printre acestea numărându-se Joao Vaccari Neto, fostul trezorier al Partidului Muncitorilor, aflat la putere în Brazilia, și Renato de Souza Duque, fostul președinte al serviciilor pentru întreprinderi ale Petrobras." } } { "translation": { "en": "Dirceu is the most senior member of the ruling Workers' Party to be taken into custody in connection with the scheme. Dirceu served as former President Luiz Inacio Lula da Silva's chief of staff between 2003 and 2005. He was arrested early August in his home, where he already was under house arrest serving an 11-year sentence for his involvement in a cash-for-votes scheme in Congress more than 10 years ago. Prosecutors have said that Dirceu masterminded the kickback scheme at Petrobras, accepted bribes while in office and continued to receive payments from contractors after he was jailed in late 2013 for the vote-buying scandal.", "ro": "Dirceu este cel mai vechi membru al Partidului Muncitorilor aflat la guvernare luat în custodie pentru legăturile cu această schemă. Dirceu a servit ca șef de cabinet al fostului președinte Luiz Inacio Lula da Silva între 2003 și 2005. A fost arestat la începutul lui august de acasă, unde deja se afla sub arest la domiciliu, cu o pedeapsă de 11 ani pentru implicarea într-o schemă de cumpărare a voturilor în Congres cu peste 10 ani în urmă. Procurorii au declarat că Dirceu a dezvoltat schema de luare de mită de la Petrobras, a acceptat mită în timp ce se afla în funcție și a continuat să primească plăți de la antreprenori după ce a fost închis la sfârșitul lui 2013 pentru scandalul voturilor cumpărate." } } { "translation": { "en": "According to prosecutors, the scheme at Petrobras involved roughly $2 billion in bribes and other illegal funds. Some of that money was allegedly funneled back to campaign coffers of the ruling party and its allies. It also allegedly included the payment of bribes to Petrobras executives in return for inflated contracts. 'Miraculous' recovery for Peshawar massacre schoolboy A teenager paralysed after being shot four times in Pakistan's deadliest terror attack has made a \"miraculous\" recovery following treatment in the UK. Muhammad Ibrahim Khan, 13, had been told by doctors in Pakistan that he would never walk again.", "ro": "Conform procurorilor, schema de la Petrobras a implicat aproximativ 2 miliarde de dolari sub formă de mită și alte fonduri ilegale. O parte din acei bani s-ar fi întors în fondul de campanie al partidului aflat la guvernare și al aliaților acestora. De asemenea, ar fi inclus mită către directorii Petrobras în schimbul unor contracte umflate. Recuperarea „miraculoasă” a unui elev supraviețuitor al masacrului de la Peshawar Un adolescent paralizat după ce fusese împușcat de patru ori în cel mai cumplit atac terorist din Pakistan a reușit o recuperare „miraculoasă” după ce a urmat un tratament în Regatul Unit. Lui Mohamed Ibrahim Khan, în vârstă de 13 ani, doctorii din Pakistan îi spuseseră că nu va mai putea să meargă niciodată." } } { "translation": { "en": "At least 140 people, mostly children, were killed when gunmen stormed Peshawar's Army Public School last December. Muhammad, who arrived in London last month for surgery, is being discharged from hospital later. Exactly nine months ago, on an ordinary Tuesday morning, Muhammad sat in his first aid class listening to his teachers intently. At the same time seven gunmen disguised in security uniforms were entering the Army Public School. They were strapped with explosives and had one simple mission in mind: Kill every man, woman and child they came across. \"I can't forget what happened that day,\" Muhammad says with a severe stare.", "ro": "Cel puțin 140 de persoane, majoritatea copii, au fost ucise când bărbați înarmați au atacat școala publică a armatei din Peshawar în luna decembrie a anului trecut. Mohamed, care a sosit la Londra luna trecută pentru operație, va fi externat mai târziu din spital. Exact cu nouă luni în urmă, într-o dimineață obișnuită de marți, Mohamed stătea la ora de primul ajutor și își asculta atent profesorii. Chiar atunci, șapte bărbați înarmați deghizați în uniformele agenților de pază intrau în școala publică a armatei. Purtau centuri cu explozivi și aveau de îndeplinit o misiune simplă: să îi ucidă pe toți bărbații, femeile și copiii care le ieșeau în cale. „Nu pot uita ce s-a întâmplat în acea zi”, spune Mohamed cu o privire aspră." } } { "translation": { "en": "We were sitting in the auditorium, we were asking questions... and then we heard heavy gunfire outside. The terrorists moved inside and they started killing - our teacher was burned alive. Muhammad described pulling four other pupils out of the auditorium as the carnage unfolded. He said he then heard his friend, Hamza calling to him. He said, 'oh brother save me'. I held his hand. That's when I was shot in the back, and he was shot in the head. Most of the people killed in the attack were pupils Hamza died in Muhammad's arms. Muhammad recalled blacking out after that, and the next thing he knew he was in a hospital bed, paralysed from the waist down.", "ro": "Stăteam în amfiteatru, puneam întrebări... apoi am auzit focuri de armă afară. Teroriștii au intrat înăuntru și au început să ucidă. Profesorul nostru a fost ars de viu. Mohamed descrie cum a scos patru elevi din amfiteatru în timp ce se desfășura carnagiul. Apoi spune că și-a auzit prietenul, pe Hamza, strigându-l. Spunea „oh, frate, salvează-mă”. L-am ținut de mână. Atunci eu am fost împușcat în spate, iar el în cap. Cei mai mulți dintre cei uciși în atac erau elevi Hamza a murit în brațele lui Mohamed. Mohamed își amintește că imediat după asta a leșinat și că următorul lucru pe care l-a știut a fost că se afla pe un pat de spital, paralizat de la brâu în jos." } } { "translation": { "en": "Doctors in Peshawar in northern Pakistan, and then Rawalpindi, close to the capital, told his family there was no treatment, and he would never walk again. \"Seeing him I felt like my soul had left my body,\" says Muhammad's father, Sher Khan Those nine months were the hardest in my life. But Mr Khan and his wife, Sherbano, refused to believe that their cricket-mad son would never be able to use his legs again. They campaigned, and appealed for help on Pakistani TV, gaining the support of high profile people such as cricketer turned politician Imran Khan.", "ro": "Doctorii din Peshawar din nordul Pakistanului, apoi cei din Rawalpindi, aproape de capitală, i-au spus familiei sale că nu exista tratament și că nu va mai putea merge niciodată. „Când l-am văzut, am simțit cum îmi iese sufletul”, spune Sher Khan, tatăl lui Mohamed. Acele nouă luni au fost cele mai grele din viața mea. Însă Khan și soția lui, Sherbano, au refuzat să creadă că fiul lor atât de pasionat de crichet nu-și va mai putea folosi vreodată picioarele. Au făcut o campanie și au cerut ajutor de la televiziunea pakistaneză, atrăgând sprijinul unor oameni faimoși precum Imran Khan, jucător de crichet devenit politician." } } { "translation": { "en": "Finally, they were able to raise the funds to bring Muhammad to the UK and provide him with treatment at London's private Harley Street Clinic. Consultant neurosurgeon Irfan Malik described Muhammad as \"terrified\" when he first arrived at the hospital. \"He'd spent the last [few] months lying on a bed, unable to move side to side,\" says Mr Malik. He was weak, he had a pressure sore on his back. He wasn't in great shape. A vertebra at the base of Muhammad's spine was destroyed Muhammad was shot in his shoulder, his hip, and his back during the attack, damaging his lower spine - leading to paralysis.", "ro": "Într-un final, au reușit să strângă fonduri pentru a-l duce pe Mohamed în Regatul Unit și a-i oferi tratament la clinica privată Harley Street din Londra. Neurochirurgul consultant Irfan Malik l-a descris pe Mohamed drept „înspăimântat” când acesta a ajuns la spital. „Își petrecuse ultimele [câteva] luni zăcând în pat, fără să se poată mișca de pe o parte pe alta, spune Malik. Era slăbit, se pusese multă presiune pe spatele lui. Nu era într-o formă prea bună. O vertebră de la baza coloanei vertebrale a lui Mohamed fusese distrusă Mohamed fusese împușcat în umăr, în șold și în spate în timpul atacului, iar coloana vertebrală inferioară îi fusese distrusă, ducând la paralizie." } } { "translation": { "en": "But during six hours of surgery, Mr Malik and his team were able to reattach nerve endings and reconstruct the damaged part of the spine. Even Mr Malik was surprised at what happened next. Exactly one week after the surgery Muhammad stood up and started taking steps and walking. We were not expecting to get that sort of excellent result. That was miraculous,\" he says. Less than two weeks after his operation, Muhammad is ready to leave hospital and start the long road to recovery. Muhammad has defied the odds and started to walk again He says he wants to build his strength and continue his education in the UK. But he says he is determined to return to Pakistan, join the army and help fight terrorism.", "ro": "Însă, în timpul unei operații care a durat șase ore, Malik și echipa lui au reușit să lege din nou terminațiile nervoase și să reconstruiască partea distrusă a coloanei. Chiar și Malik a fost surprins de ceea ce s-a întâmplat în continuare. Exact la o săptămână după operație, Mohamed s-a ridicat și a început să facă pași și să meargă. Nu ne așteptam la un rezultat atât de bun. A fost un miracol”, spune acesta. În mai puțin de două săptămâni de la operație, Mohamed este gata să părăsească spitalul și să înceapă procesul lung de recuperare. Mohamed a sfidat soarta și a început să meargă din nou Vrea să devină puternic și să își continue studiile în Regatul Unit. Însă este hotărât să revină în Pakistan, să se înroleze în armată și să lupte împotriva terorismului." } } { "translation": { "en": "\"I feel like I have a second chance at life,\" he says as he shows off pictures he's drawn of guns scribbled out next to school books and pens Muhammad grows physically stronger every day but the psychological trauma he continues to endure is unimaginable. \"My anger is not diminishing\" he says. In my school little kids were killed. What was their crime? His mother, wiping a tear from her eye, caressed his head and said: \"I can see my son walking again.\" He'll be able to get on with his normal life. 'Super Voice' 4G service from Three offers better signal Three is making use of a lower frequency 4G spectrum that can travel more widely", "ro": "„Simt că am încă o șansă la viață” spune el, arătând imaginile cu arme desenate de el lângă manuale școlare și stilouri Fizic, Mohamed devine tot mai puternic în fiecare zi, însă trauma psihologică prin care trece și acum este de neimaginat. „Furia mea nu a scăzut”, mărturisește el. În școala mea au fost uciși copii mici. Ce crimă au comis ei? Mama lui își șterge o lacrimă, îl mângâie pe creștet și spune: „Îmi văd fiul mergând din nou”. Va putea să-și continue firesc viața. Serviciul 4G „Super Voice” de la Three oferă semnal mai bun Three folosește un spectru 4G cu o frecvență mai joasă, care poate acoperi o zonă mai extinsă" } } { "translation": { "en": "Mobile phone provider Three has launched a UK service it says will improve reception inside buildings and in rural black spots. Its 4G Super Voice enables customers to make calls and send texts using a lower frequency spectrum. Other networks are looking into introducing the technology, known as Voice Over Long-Term Evolution (VoLTE). It currently works on only the Samsung Galaxy S5, but recent iPhone handsets will be added in the coming months. Three said up to 5.5 million customers would have access to the service by 2017.", "ro": "Furnizorul de telefonie mobilă Three a lansat în Regatul Unit un serviciu despre care spune că va îmbunătăți recepția în interiorul clădirilor și în zonele rurale fără semnal. Serviciul 4G Super Voice le permite clienților să efectueze apeluri și să trimită mesaje text folosind un spectru cu o frecvență mai joasă. Și alte rețele intenționează să introducă aceeași tehnologie, cunoscută ca „Voice Over Long-Term Evolution (VoLTE)”. Aceasta funcționează momentan doar cu Samsung Galaxy S5, însă telefoanele iPhone recente vor beneficia de ea în lunile următoare. Three menționează că până la 5,5 milioane de clienți vor avea acces la serviciu până în 2017." } } { "translation": { "en": "Chief technology officer Bryn Jones said: \"By the end of the year, one million of our customers will have access to better indoor coverage and be able to use their phones in more places than ever before.\" Stars prepare for panto season Pantomime season is big business for theatres up and down the UK, with many getting ready for this year's season now. Some of the biggest names in showbusiness now take part in the yuletide theatre. Matthew Kelly and Hayley Mills will be appearing in Cinderella - one as an ugly sister, the other as fairy godmother. They reveal their panto secrets to BBC Breakfast. Steven Wilson: 'If I don't do anything, I feel this creeping guilt'", "ro": "Responsabilul șef pentru tehnologie, Bryn Jones a declarat: „Până la sfârșitul anului, un milion dintre clienții noștri vor avea acces la o acoperire mai bună în interior și își vor putea folosi telefoanele în mai multe locuri ca până acum”. Vedetele se pregătesc pentru stagiunea de pantomimă Stagiunea de pantomimă este foarte importantă pentru teatrele din tot Regatul Unit, multe dintre ele pregătindu-se acum pentru stagiunea din acest an. Acum, la teatrul de Crăciun participă unele dintre numele cele mai mari din showbusiness. Matthew Kelly și Hayley Mills vor apărea în Cenușăreasa - primul în rolul uneia dintre surorile rele, iar a doua în rolul zânei. Aceștia dezvăluie secretele pantomimei lor la BBC Breakfast. Steven Wilson: „Dacă nu fac nimic, mă simt vinovat”" } } { "translation": { "en": "Steven Wilson was recently the big winner at the Progressive Music Awards Steven Wilson is often dubbed the hardest working musician in the world of progressive rock. The multi-talented musician won three prizes at this month's Progressive Music Awards in London, including album of the year for Hand. The Guardian's five-star review called it \"a smart, soulful and immersive work of art.\" Since the 1980s, Wilson has been the driving force in a number of musical projects, the best known of which is the rock band Porcupine Tree. Now, ahead of two sell-out shows at the Royal Albert Hall, Wilson is releasing a vinyl-only double LP, Transience, to showcase the \"more accessible\" side of his solo output.", "ro": "Steven Wilson a fost desemnat recent drept marele câștigător al Progressive Music Awards Steven Wilson a fost numit de multe ori drept cel mai muncitor muzician din lumea rockului progresiv. Talentatul muzician a câștigat trei premii la Progressive Music Awards, care a avut loc luna aceasta la Londra, printre care și premiul pentru cel mai bun album al anului pentru Hand. În recenzia sa de cinci stele, The Guardian a numit albumul „o operă de artă inteligentă, expresivă și captivantă”. Încă din anii 1980, Wilson este motorul mai multor proiecte muzicale, cel mai cunoscut dintre acestea fiind trupa de rock Porcupine Tree. Acum, înainte de două spectacole cu casa închisă la Royal Albert Hall, Wilson lansează un dublu LP doar în format vinil, Transience, pentru a arăta latura „mai accesibilă” a activității sale solo." } } { "translation": { "en": "He tells the BBC about his love of vinyl, his busy schedule and explains how comic actor Matt Berry came to be his support act. What does vinyl mean to you? I grew up at the very tail end of the vinyl era, and at the time, I remember, we couldn't wait for CD to come along because vinyl was so frustrating. You would buy the record, take it home, and it would have a scratch, and you would have to take it back again. I love CDs, and for some kinds of music - classical for example - it is better than vinyl. But the problem with the CD and digital downloads is that there's nothing you can really cherish or treasure. Owning vinyl is like having a beautiful painting hanging in your living room.", "ro": "A povestit pentru BBC despre dragostea lui pentru viniluri și despre programul său încărcat și a explicat cum a ajuns actorul de comedie Matt Berry să îi deschidă spectacolele. Ce înseamnă vinil pentru tine? Am crescut chiar în perioada de sfârșit a erei vinilurilor și îmi amintesc că atunci abia așteptam apariția CD-ului, căci vinilul era atât de enervant. Cumpărai un disc, mergeai cu el acasă, avea o zgârietură și trebuia să îl aduci înapoi. Iubesc CD-urile, iar pentru anumite tipuri de muzică, de exemplu cea clasică, sunt mai bune decât vinilurile. Însă problema cu CD-urile și cu descărcările digitale este aceea că nu mai există nimic pe care să îl prețuiești cu adevărat. Să ai un vinil e ca și cum ai avea un tablou frumos agățat în sufragerie." } } { "translation": { "en": "It's something you can hold, pore over the lyrics and immerse yourself in the art work. I thought it was just a nostalgic thing, but it can't be if kids too young to remember vinyl are enjoying that kind of experience. Do you have a piece of vinyl that you treasure? The truth is I got rid of 100% of my vinyl in the 90s. All the vinyl I have is re-bought. I started off from the perspective that I wanted to recreate the collection I had when I was 15, but it's gone beyond that. The first record which I persuaded my parents to buy for me was Electric Light Orchestra's Out of the Blue.", "ro": "E ceva ce poți ține în mână, în timp ce te lași absorbit de versuri și copleșit de actul artistic. Am crezut că e doar o chestie nostalgică, însă nu are cum să fie așa dacă unor puști prea tineri să-și amintească de viniluri le place acest gen de experiență. Ai vreun vinil la care ții în mod special? Recunosc că am scăpat de toate vinilurile în anii '90. Toate vinilurile pe care le am sunt cumpărate din nou. Am pornit de la ideea de a reface colecția pe care o aveam la 15 ani, însă am trecut de limita aceea. Primul disc pe care mi-am convins părinții să mi-l cumpere a fost Out of the Blue de la Electric Light Orchestra." } } { "translation": { "en": "If I still had my original copy, it would have sentimental value, but, alas, it's in a charity shop somewhere. Steven Wilson hopes the album will be a doorway for potential new fans Why release your new compilation Transience on vinyl? It was originally conceived as an idea for Record Store Day, but we missed the boat on that. My record company had suggested I put together some of my shorter, more accessible songs. I got a bit obsessed by the idea to make something like \"an introduction to Steven Wilson,\" and I was committed to it being a vinyl-only release. Anyone who buys the vinyl does also get a high-resolution download.", "ro": "Dacă aș mai fi avut încă exemplarul inițial, acesta ar fi avut valoare sentimentală, însă, din păcate, se află pe undeva printr-un magazin de caritate. Steven Wilson speră că albumul va fi o poartă către posibili fani noi De ce ți-ai lansat noua compilație Transience pe vinil? Aceasta a fost concepută inițial ca idee pentru Ziua magazinelor de discuri, însă am ratat ocazia. Casa mea de discuri sugerase să adun câteva dintre melodiile mele mai scurte și mai accesibile. Am ajuns să fiu ușor obsedat de ideea de a face ceva gen „introducere în muzica lui Steven Wilson” și am ținut neapărat ca proiectul să fie lansat doar pe vinil. Cine cumpără vinilul primește, de asemenea, și o variantă descărcată la rezoluție înaltă." } } { "translation": { "en": "Do you have a concern that the album won't show your work in a true light?", "ro": "Ești îngrijorat că albumul nu va arăta muzica ta în adevărata ei lumină?" } }
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/wmt16/sample.json
{"translation": {"en": "Membership of Parliament: see Minutes", "ro": "Componenţa Parlamentului: a se vedea procesul-verbal"}} {"translation": {"en": "Approval of Minutes of previous sitting: see Minutes", "ro": "Aprobarea procesului-verbal al şedinţei precedente: a se vedea procesul-verbal"}} {"translation": {"en": "Membership of Parliament: see Minutes", "ro": "Componenţa Parlamentului: a se vedea procesul-verbal"}} {"translation": {"en": "Verification of credentials: see Minutes", "ro": "Verificarea prerogativelor: a se vedea procesul-verbal"}} {"translation": {"en": "Documents received: see Minutes", "ro": "Depunere de documente: a se vedea procesul-verbal"}} {"translation": {"en": "Written statements and oral questions (tabling): see Minutes", "ro": "Declaraţii scrise şi întrebări orale (depunere): consultaţi procesul-verbal"}} {"translation": {"en": "Petitions: see Minutes", "ro": "Petiţii: a se vedea procesul-verbal"}} {"translation": {"en": "Texts of agreements forwarded by the Council: see Minutes", "ro": "Transmiterea de către Consiliu a textelor acordurilor: a se vedea procesul-verbal"}} {"translation": {"en": "Action taken on Parliament's resolutions: see Minutes", "ro": "Cursul dat rezoluţiilor Parlamentului: a se vedea procesul-verbal"}} {"translation": {"en": "Agenda for next sitting: see Minutes", "ro": "Ordinea de zi a următoarei şedinţe: a se vedea procesul-verbal"}}
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/conll/sample.json
{"words": ["He", "was", "the", "27th", "pitcher", "used", "by", "the", "Angels", "this", "season", ",", "tying", "a", "major-league", "record", "."], "ner": ["O", "O", "O", "O", "O", "O", "O", "O", "B-ORG", "O", "O", "O", "O", "O", "O", "O", "O"]} {"words": ["CHICAGO", "AT", "ATLANTA"], "ner": ["B-ORG", "O", "B-LOC"]} {"words": ["President", "Bill", "Clinton", "earlier", "this", "month", "invoked", "special", "powers", "to", "appoint", "Fowler", "during", "the", "congressional", "recess", "because", "the", "Senate", "delayed", "confirming", "his", "nomination", "."], "ner": ["O", "B-PER", "I-PER", "O", "O", "O", "O", "O", "O", "O", "O", "B-PER", "O", "O", "O", "O", "O", "O", "B-ORG", "O", "O", "O", "O", "O"]} {"words": ["goals", "for", ",", "goals", "against", ",", "points", ")", "."], "ner": ["O", "O", "O", "O", "O", "O", "O", "O", "O"]} {"words": ["\"", "It", "is", "one", "step", "short", "of", "an", "emergency", "situation", ",", "\"", "a", "police", "spokesman", "said", "via", "telephone", "from", "a", "command", "post", "in", "the", "bush", "."], "ner": ["O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O"]} {"words": ["U.S.", "Ambassador", "Myles", "Frechette", "applauded", "the", "move", ",", "saying", "it", "could", "prompt", "the", "Clinton", "administration", "to", "remove", "Colombia", "from", "a", "list", "of", "outcast", "nations", "that", "have", "failed", "to", "cooperate", "in", "U.S.", "counternarcotics", "efforts", "."], "ner": ["B-LOC", "O", "B-PER", "I-PER", "O", "O", "O", "O", "O", "O", "O", "O", "O", "B-PER", "O", "O", "O", "B-LOC", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "B-LOC", "O", "O", "O"]} {"words": ["Halftime"], "ner": ["O"]} {"words": ["It", "has", "manufacturing", "plants", "in", "San", "Diego", ";", "Creedmoor", ",", "N.C.", ";", "Hampshire", ",", "England", ";", "and", "Tijuana", ",", "Mexico", ",", "and", "distributes", "its", "prodcuts", "in", "more", "than", "120", "countries", "."], "ner": ["O", "O", "O", "O", "O", "B-LOC", "I-LOC", "O", "B-LOC", "O", "B-LOC", "O", "B-LOC", "O", "B-LOC", "O", "O", "B-LOC", "O", "B-LOC", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O"]} {"words": ["Scotland", "manager", "Craig", "Brown", "said", "on", "Thursday", ":", "\"", "I", "'ve", "watched", "Duncan", "Ferguson", "in", "action", "twice", "recently", "and", "he", "'s", "bang", "in", "form", "."], "ner": ["B-LOC", "O", "B-PER", "I-PER", "O", "O", "O", "O", "O", "O", "O", "O", "B-PER", "I-PER", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O"]} {"words": ["Clinton", "flew", "in", "by", "helicopter", "from", "Michigan", "City", ",", "Indiana", ",", "after", "ending", "a", "four-day", ",", "559-mile", "trip", "aboard", "a", "campaign", "train", "from", "Washington", "."], "ner": ["B-PER", "O", "O", "O", "O", "O", "B-LOC", "I-LOC", "O", "B-LOC", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "B-LOC", "O"]}
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/MRPC/train.tsv
Quality #1 ID #2 ID #1 String #2 String 1 1355540 1355592 He said the foodservice pie business doesn 't fit the company 's long-term growth strategy . " The foodservice pie business does not fit our long-term growth strategy . 0 2029631 2029565 Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war . His wife said he was " 100 percent behind George Bush " and looked forward to using his years of training in the war . 0 487993 487952 The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat . The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent . 1 1989515 1989458 The AFL-CIO is waiting until October to decide if it will endorse a candidate . The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries . 0 1783137 1782659 No dates have been set for the civil or the criminal trial . No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty . 1 3039165 3039036 Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed . It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/MRPC/dev.tsv
Quality #1 ID #2 ID #1 String #2 String 1 1355540 1355592 He said the foodservice pie business doesn 't fit the company 's long-term growth strategy . " The foodservice pie business does not fit our long-term growth strategy . 0 2029631 2029565 Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war . His wife said he was " 100 percent behind George Bush " and looked forward to using his years of training in the war . 0 487993 487952 The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat . The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent . 1 1989515 1989458 The AFL-CIO is waiting until October to decide if it will endorse a candidate . The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries . 0 1783137 1782659 No dates have been set for the civil or the criminal trial . No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty . 1 3039165 3039036 Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed . It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/MRPC/dev.csv
label,sentence1,sentence2 equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,""" The foodservice pie business does not fit our long-term growth strategy ." not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,"His wife said he was "" 100 percent behind George Bush "" and looked forward to using his years of training in the war ." not_equivalent,"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .","The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent ." equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries . not_equivalent,No dates have been set for the civil or the criminal trial .,"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty ." equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/MRPC/train.csv
label,sentence1,sentence2 equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,""" The foodservice pie business does not fit our long-term growth strategy ." not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,"His wife said he was "" 100 percent behind George Bush "" and looked forward to using his years of training in the war ." not_equivalent,"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .","The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent ." equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries . not_equivalent,No dates have been set for the civil or the criminal trial .,"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty ." equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status .
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/xsum/sample.json
{"document": "The warning begins at 22:00 GMT on Saturday and ends at 10:00 on Sunday.\nThe ice could lead to difficult driving conditions on untreated roads and slippery conditions on pavements, the weather service warned.\nOnly the southernmost counties and parts of the most westerly counties are expected to escape.\nCounties expected to be affected are Carmarthenshire, Powys, Ceredigion, Pembrokeshire, Denbighshire, Gwynedd, Wrexham, Conwy, Flintshire, Anglesey, Monmouthshire, Blaenau Gwent, Caerphilly, Merthyr Tydfil, Neath Port Talbot, Rhondda Cynon Taff and Torfaen.", "summary": "The Met Office has issued a yellow weather warning for ice across most of Wales."} {"document": "You can see highlights of Sunderland v Arsenal on Match of the Day at 22:20 BST on Saturday on BBC One and the BBC Sport website.\nStoke and West Ham, for example, have started to climb away from the relegation zone but the biggest worry for Sunderland fans is that their side do not look remotely capable of doing the same.\nI know the Black Cats have got out of trouble before having found themselves in a similar situation but this time, after picking up only two points from their first nine games, things look really desperate for the only top-flight team without a win.\nAt least one element of their struggles seems to be self-inflicted, with everyone at the club feeling sorry for themselves - and not just because they have lost some players to injury and conceded some costly late goals.\nThere is a negative feeling about the place with the manager David Moyes and his players talking about how they have gone backwards since last season, when they should be searching for any kind of spark that could change things around.\nFrom the outside, looking at the way they play and their lack of creativity, it is hard to see what that spark might be or what could fundamentally change under Moyes until the January transfer window opens.\nIf they can get one win under their belt then they will get a bit of belief back but, the longer this winless run goes on, the more negativity there will be.\nMedia playback is not supported on this device\nSunderland finished last season on a high under Sam Allardyce, with a run of just one defeat in their last 11 games securing their safety.\nIn the space of five months, all of that confidence and momentum seems to have been sucked out of the club, despite them effectively having the same group of players who, not so long ago, looked inspired.\nThat is not all down to Moyes, but he has to take some responsibility for it.\nI am yet to see a defined style of play from Sunderland since he took charge at the end of July.\nThat is in contrast to Allardyce's time as manager, when they were resolute and difficult to beat and, at the end of his stint at the Stadium of Light, also played with a purpose when they went forward.\nOff the pitch, Moyes has not helped himself much either.\nThere was no need for him to be so pessimistic when he came out after the second game of the season and announced they would be in a relegation fight, which did not send out the right message to his players or the fans.\nWhen he took charge, he had actually started out by being unrealistically positive - talking about Sunderland becoming a club that regularly finished in the top half of the Premier League - but his expectations went downhill very quickly.\nI know you can argue that he has been proved right, because Sunderland are now battling the drop, but it meant there was a cloud over from them almost as soon as the season had started.\nIt seems to be a case that if you stop Jermain Defoe, you stop Sunderland. His statistics stand up well in comparison to last season, but the rest of their team are not doing enough in attack.\nThey were reliant on Defoe last season too, but others did chip in - in their first nine league games of 2015-16, five players found the net. This time around, only Defoe and Patrick van Aanholt have scored in the same period.\nIt is going to be a massive struggle for them to stay up from the position they are now in anyway, but they badly need a win and quickly. I don't see it coming at home to Arsenal on Saturday, though.\nDo they even look capable of holding out for a draw against the Gunners, the way another struggling team Middlesbrough did at Emirates Stadium last weekend? No.\nIf you struggle to make chances and score goals, as Sunderland do, that puts more pressure on your defence because you know if you concede then you are in big trouble.\nAnd the Black Cats have problems at the back as well - their only clean sheet in 12 matches under Moyes was against League One side Shrewsbury Town in the EFL Cup.\nIt does not bode well against an Arsenal side that are averaging more than two goals a game this season.\nIt is hard to find any positives from Sunderland's situation but at least they have not been cut adrift at the bottom - yet.\nUnless they win soon, that could happen. I think Hull are also in for a very tough season but when I look at the other two teams immediately above them, Boro and Swansea, they definitely have more about them than the Black Cats do.\nMedia playback is not supported on this device\nChanging manager has clearly not helped Sunderland and comparisons with his predecessor do not help Moyes much either.\nYou cannot tell me that, if Allardyce was still in charge, Sunderland would have only picked up two points so far. It just would not have happened.\nMoyes replaced him relatively late in the summer, which is difficult in itself, but he can only complain about the things that have gone against him up to a point. He should be doing much better than he is.\nHe is still the manager and he is capable of turning things around, so it is right there is no suggestion of him getting the sack.\nBut that will not last forever. This industry is results-driven and Moyes' results are not good enough.\nThat clearly has to change soon and, looking at Sunderland's next few fixtures, the one that stands out as a must-win is their home game against Hull on 19 November.\nIf they fail to beat Arsenal and Bournemouth, then the visit of the Tigers will be the game to define Moyes' tenure. If Sunderland are still without a win after that, things will become extremely difficult for him.\nChris Sutton was speaking to BBC Sport's Chris Bevan.", "summary": "We are exactly a quarter of the way through the Premier League season and some teams at the bottom of the table seem to be turning things around after making a bad start."} {"document": "The win keeps the Candystripes two points behind leaders Dundalk who won 2-0 away to Shamrock Rovers.\nFormer Plymouth striker Patterson scored his sixth goal of the season in the 14th minute at the Brandywell.\nHe shot into an empty net after the ball broke to him when keeper Dean Delany thwarted Barry McNamee.\nKurtis Byrne should have netted a speedy equaliser but the son of former Celtic player Paul Byrne completely missed his kick in front of goal.\nThat was the one big scare for Kenny Shiels' men on a night when both keepers had a quiet night.\nDerry City have won six and drawn two in the eight games they have played since losing to Finn Harps on the first day of the season.", "summary": "Rory Patterson's early goal proved enough to give second-placed Derry City a home victory over Bohemians in Friday night's Premier Division clash."} {"document": "The centre-right coalition led by Mr Passos Coelho won the most seats in the election on 4 October.\nBut Socialist leader Antonio Costa has been working to build a coalition with far-left parties.\nMany believe that Mr Passos Coelho will fail to pass the test of a vote of no confidence in Portugal's parliament.\nPresident Anibal Cavaco Silva would then be expected to ask the left to form a government.\nThere are fears that weeks of uncertainty could harm Portugal's economic recovery, more than a year after it exited the strict terms of its €78bn (£57bn) international bailout.\nEU officials have threatened to take action against Portugal for missing a 15 October deadline to present its draft 2016 budget.\nPortugal is still running one of the highest budget deficits in the eurozone.\n12%\nof the workforce is unemployed\n20%\nof people live below the poverty line\n485,000 emigrated from Portugal between 2011 and 2014\n125% debt to GDP - the second highest rate in the European Union\nMr Passos Coelho's Social Democrats have promised to present a budget, but the two left-wing parties campaigned strongly against his outgoing government's record of harsh austerity.\nThe Left Bloc is seen as allied to the anti-austerity Syriza party in Greece, which for months tried to renegotiate the terms of Greece's eurozone bailout.\nPortugal's Communist Party is regarded as anti-euro and anti-Nato, although it is thought to have moderated its eurozone policies in recent weeks.\nIf Mr Costa's Socialists are eventually chosen to lead a left-wing coalition, it would be the first time since the fall of Portugal's dictatorship in 1974 that a right-wing president appointed a government backed by communists.\nAfter his re-appointment as prime minister leading a right-of-centre coalition, Pedro Passos Coelho has 10 days to appoint ministers and secure parliamentary approval.\nThat may prove impossible, since his coalition lost its majority in the 4 October election and the Socialists have pledged to reject his programme if their talks with other parties succeed.\nTogether, the Socialists, Left Bloc and Communist Party have a majority. All wanted the president to appoint Mr Costa - arguing that anything else was a waste of time.\nIf Mr Passos Coelho does fail, the president could then appoint Mr Costa or keep the incumbent on as caretaker.\nFresh legislative elections may only take place from June, after voters have elected a new president early next year.", "summary": "The Portuguese president has invited incumbent Prime Minister Pedro Passos Coelho to form the next government, despite him having lost his majority."} {"document": "Nev Edwards scored an early try for Sale, before Castres' Florian Vialelle went over, but Julien Dumora's penalty put the hosts 10-7 ahead at the break.\nJoe Ford sent over a penalty before Castres' Marc-Antoine Rallier and Sales' Will Addison were sin-binned.\nJulien Caminati's late attempt to stop Charlie Ingall saw Sale awarded the decisive penalty try.\nThe win moves the English Premiership side to within one point of Pool Two leaders Newport Gwent Dragons after three games.\nSale got off to the ideal start, Edwards sprinting away for the game's opening points from an Andrei Ostrikov kick, but Castres heaped the pressure on in search of a reply, which came through Vialelle on eight minutes.\nSharks flanker Magnus Lund was forced off with a head injury before the television match official denied Castres a second try, with replays showing that the Sharks defence did enough to force full-back Caminati into touch.\nFord had a chance to put Sale ahead again, but his penalty on 27 minutes drifted wide. Dumora, however, made no mistake soon after, slotting over to give the French side the lead on 33 minutes.\nA combination of probing grubber kicks and scrappy play eventually led to Ford teeing up his second penalty attempt, with the fly-half this time booting the three points to make it 10-10.\nRallier's yellow card following a scuffle saw Ford opt for the posts soon after, but he was off target again before Sales' one-man advantage was lost as Addison was sin-binned.\nSharks pushed for the breakthrough as Ingall went close to touching down, and the video referee eventually gave the penalty try after deciding that Caminati's attempt to stop the winger was illegal.\nCastres: Caminati; Martial, Vialelle, Combezou, Decrop; Dumora, Dupont; Taumoepeau, Rallier, Montes; Samson, Moreaux, Caballero, Diarra, Beattie.\nReplacements: Beziat, Tichit, Martinez, Desroche, Babillot, Fontaine, Lamerat, Seron.\nSale: Arscott; Edwards, Addison, Jennings, Ingall; Ford, Mitchell, Lewis-Roberts, Briggs, Mujati, Mills, Ostrikov, Lund, Seymour (capt), Easter.\nReplacements: Taylor, Flynn, Parker, Beaumont, Neild, Jeffers, James, Haley.\nReferee: David Wilkinson (Ireland)", "summary": "A late penalty try gave Sale victory over Castres at Stade Pierre-Antoine in their European Challenge Cup clash."} {"document": "The 33-year-old was released by Norwich this summer after five years at the club, during which time he made 75 Canaries first-team appearances.\nTurner also had spells on loan at Fulham and Sheffield Wednesday during his time at Carrow Road.\nIn total, the centre-back has made 436 senior career appearances for eight different clubs.\nFind all the latest football transfers on our dedicated page.", "summary": "League One side Southend United have signed former Hull and Norwich defender Michael Turner on a one-year deal."} {"document": "United contacted St Johnstone this week with a view to speaking to 52-year-old Wright about the job but this approach was rejected by the Saints board.\nThe Tannadice club - bottom of the Premiership - are seeking to replace Jackie McNamara, who left last month.\nDave Bowman took the first team for Saturday's loss to Partick Thistle.\nThe Tangerines have won only once this season and prop up the table with five points from 10 games.\nFormer Northern Ireland goalkeeper Wright, who replaced Steve Lomas at McDiarmid Park in 2013, led St Johnstone to Scottish Cup success in his first season in charge.\nHe has also secured two successive top-six finishes for the Perth side and previously managed in his homeland.", "summary": "St Johnstone boss Tommy Wright is no longer under consideration for the Dundee United manager's job, BBC Scotland has learned."} {"document": "Media playback is unsupported on your device\n2 November 2014 Last updated at 17:20 GMT\nHomes and businesses were damaged in the storm, but weather experts were not able to confirm it was a tornado.\nNavtej Johal reports.", "summary": "Residents in Coalville in Leicestershire are cleaning up after high winds hit the town."} {"document": "5 August 2015 Last updated at 06:36 BST\nShe's now 84 and has been telling Newsround the inspiring story of her life before and after that devastating and world-changing event.\nThis animation contains some sad moments that you might find upsetting.\nYou can find out more about what happened in Hiroshima here.\nWatch 'Hiroshima: A Newsround Special' - Thursday 6 August at 5.30pm on the CBBC channel and on the Newsround website.", "summary": "Bun Hashizume was 14 years old and lived in Hiroshima, in Japan, when a nuclear bomb was dropped on the city 70 years ago, at the end of World War Two."} {"document": "But what has been your moment of the year?\nFrom Ben Stokes' 258 off 198 balls against South Africa to Stuart Broad's 6-17 against the same opponents, and Alastair Cook being the first Englishman to reach 10,000 Test runs, there are lots of highlights.\nOr perhaps you revelled in Australia being skittled for just 85? Or the dog that invaded the pitch at Vizag?\nThe cricket brains of BBC Sport and BBC Radio 5 live asked you to rank your top 10, and your shortlist will be revealed on Tuesday's Tuffers and Vaughan Cricket Show (20:30 GMT, BBC Radio 5 live and online).\nVotes will no longer count but you can still pick your top 10 and share with friends.\nWhat are your top 10 cricketing moments from this year?", "summary": "It's been topsy-turvy for the England side but eventful and entertaining nonetheless."}
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/COCO/coco_annotations.txt
[{"segmentation": [[333.96, 175.14, 338.26, 134.33, 342.55, 95.67, 348.99, 79.57, 368.32, 80.64, 371.54, 91.38, 364.03, 106.41, 356.51, 145.07, 351.14, 166.55, 350.07, 184.8, 345.77, 185.88, 332.89, 178.36, 332.89, 172.99]], "area": 2120.991099999999, "iscrowd": 0, "image_id": 39769, "bbox": [332.89, 79.57, 38.65, 106.31], "category_id": 75, "id": 1108446}, {"segmentation": [[44.03, 86.01, 112.75, 74.2, 173.96, 77.42, 175.03, 89.23, 170.74, 98.9, 147.11, 102.12, 54.77, 119.3, 53.69, 119.3, 44.03, 113.93, 41.88, 94.6, 41.88, 94.6]], "area": 4052.607, "iscrowd": 0, "image_id": 39769, "bbox": [41.88, 74.2, 133.15, 45.1], "category_id": 75, "id": 1110067}, {"segmentation": [[1.08, 473.53, 633.17, 473.53, 557.66, 376.45, 535.01, 366.74, 489.71, 305.26, 470.29, 318.2, 456.27, 351.64, 413.12, 363.51, 376.45, 358.11, 348.4, 350.56, 363.51, 331.15, 357.03, 288.0, 353.8, 257.8, 344.09, 190.92, 333.3, 177.98, 345.17, 79.82, 284.76, 130.52, 265.35, 151.01, 308.49, 189.84, 317.12, 215.73, 293.39, 243.78, 269.66, 212.49, 235.15, 199.55, 214.65, 193.08, 187.69, 217.89, 159.64, 278.29, 135.91, 313.89, 169.35, 292.31, 203.87, 281.53, 220.04, 292.31, 220.04, 307.42, 175.82, 345.17, 155.33, 360.27, 105.71, 363.51, 85.21, 374.29, 74.43, 366.74, 70.11, 465.98, 42.07, 471.37, 33.44, 457.35, 34.52, 414.2, 29.12, 368.9, 9.71, 291.24, 46.38, 209.26, 99.24, 128.36, 131.6, 107.87, 50.7, 117.57, 40.99, 103.55, 40.99, 85.21, 60.4, 77.66, 141.3, 70.11, 173.66, 72.27, 174.74, 92.76, 204.94, 72.27, 225.44, 62.56, 262.11, 56.09, 292.31, 53.93, 282.61, 81.98, 298.79, 96.0, 310.65, 102.47, 348.4, 74.43, 373.21, 81.98, 430.38, 35.6, 484.31, 23.73, 540.4, 46.38, 593.26, 66.88, 638.56, 80.9, 632.09, 145.62, 581.39, 118.65, 543.64, 130.52, 533.93, 167.19, 512.36, 197.39, 498.34, 218.97, 529.62, 253.48, 549.03, 273.98, 584.63, 276.13, 587.87, 293.39, 566.29, 305.26, 531.78, 298.79, 549.03, 319.28, 576.0, 358.11, 560.9, 376.45, 639.64, 471.37, 639.64, 2.16, 1.08, 0.0]], "area": 176277.55269999994, "iscrowd": 0, "image_id": 39769, "bbox": [1.08, 0.0, 638.56, 473.53], "category_id": 63, "id": 1605237}, {"segmentation": [[1.07, 1.18, 640.0, 3.33, 638.93, 472.59, 4.3, 479.03]], "area": 301552.6694999999, "iscrowd": 0, "image_id": 39769, "bbox": [1.07, 1.18, 638.93, 477.85], "category_id": 65, "id": 1612051}, {"segmentation": [[138.75, 319.38, 148.75, 294.38, 165.0, 246.87, 197.5, 205.63, 247.5, 203.13, 268.75, 216.88, 280.0, 239.38, 293.75, 244.38, 303.75, 241.88, 307.5, 228.13, 318.75, 220.63, 315.0, 200.63, 291.25, 171.88, 265.0, 156.88, 258.75, 148.13, 262.5, 135.63, 282.5, 123.13, 292.5, 115.63, 311.25, 108.13, 313.75, 106.88, 296.25, 93.13, 282.5, 84.38, 292.5, 64.38, 288.75, 60.63, 266.25, 54.38, 232.5, 63.12, 206.25, 70.63, 170.0, 100.63, 136.25, 114.38, 101.25, 138.13, 56.25, 194.38, 27.5, 259.38, 17.5, 299.38, 32.5, 378.13, 31.25, 448.13, 41.25, 469.38, 66.25, 466.88, 70.0, 419.38, 71.25, 391.88, 77.5, 365.63, 113.75, 364.38, 145.0, 360.63, 168.75, 349.38, 191.25, 330.63, 212.5, 319.38, 223.75, 305.63, 206.25, 286.88, 172.5, 288.13]], "area": 53301.618749999994, "iscrowd": 0, "image_id": 39769, "bbox": [17.5, 54.38, 301.25, 415.0], "category_id": 17, "id": 2190839}, {"segmentation": [[543.75, 136.88, 570.0, 114.38, 591.25, 123.13, 616.25, 140.63, 640.0, 143.13, 636.25, 124.37, 605.0, 103.13, 640.0, 103.13, 633.75, 86.88, 587.5, 73.13, 548.75, 49.38, 505.0, 35.63, 462.5, 25.63, 405.0, 48.13, 362.5, 111.88, 347.5, 179.38, 355.0, 220.63, 356.25, 230.63, 365.0, 264.38, 358.75, 266.88, 358.75, 270.63, 356.25, 291.88, 356.25, 325.63, 355.0, 338.13, 350.0, 348.13, 365.0, 354.38, 396.25, 351.88, 423.75, 355.63, 446.25, 350.63, 460.0, 345.63, 462.5, 321.88, 468.75, 306.88, 481.25, 299.38, 516.25, 341.88, 536.25, 368.13, 570.0, 369.38, 578.75, 359.38, 555.0, 330.63, 532.5, 298.13, 563.75, 299.38, 582.5, 298.13, 586.25, 286.88, 578.75, 278.13, 548.75, 269.38, 525.0, 256.88, 505.0, 206.88, 536.25, 161.88, 540.0, 149.38]], "area": 59700.95625, "iscrowd": 0, "image_id": 39769, "bbox": [347.5, 25.63, 292.5, 343.75], "category_id": 17, "id": 2190842}]
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt
[{"id": 8222595, "category_id": 17, "iscrowd": 0, "bbox": [18, 54, 301, 415], "area": 53306}, {"id": 8225432, "category_id": 17, "iscrowd": 0, "bbox": [349, 26, 291, 343], "area": 59627}, {"id": 8798150, "category_id": 63, "iscrowd": 0, "bbox": [1, 0, 639, 474], "area": 174579}, {"id": 14466198, "category_id": 75, "iscrowd": 0, "bbox": [42, 74, 133, 45], "area": 4068}, {"id": 12821912, "category_id": 75, "iscrowd": 0, "bbox": [333, 80, 38, 106], "area": 2118}, {"id": 10898909, "category_id": 93, "iscrowd": 0, "bbox": [0, 0, 640, 480], "area": 2750}]
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/STS-B/train.tsv
index genre filename year old_index source1 source2 sentence1 sentence2 score 0 main-captions MSRvid 2012test 0001 none none A plane is taking off. An air plane is taking off. 5.000 1 main-captions MSRvid 2012test 0004 none none A man is playing a large flute. A man is playing a flute. 3.800 2 main-captions MSRvid 2012test 0005 none none A man is spreading shreded cheese on a pizza. A man is spreading shredded cheese on an uncooked pizza. 3.800 3 main-captions MSRvid 2012test 0006 none none Three men are playing chess. Two men are playing chess. 2.600 4 main-captions MSRvid 2012test 0009 none none A man is playing the cello. A man seated is playing the cello. 4.250 5 main-captions MSRvid 2012test 0011 none none Some men are fighting. Two men are fighting. 4.250 6 main-captions MSRvid 2012test 0012 none none A man is smoking. A man is skating. 0.500 7 main-captions MSRvid 2012test 0013 none none The man is playing the piano. The man is playing the guitar. 1.600 8 main-captions MSRvid 2012test 0014 none none A man is playing on a guitar and singing. A woman is playing an acoustic guitar and singing. 2.200
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/STS-B/dev.tsv
index genre filename year old_index source1 source2 sentence1 sentence2 score 0 main-captions MSRvid 2012test 0000 none none A man with a hard hat is dancing. A man wearing a hard hat is dancing. 5.000 1 main-captions MSRvid 2012test 0002 none none A young child is riding a horse. A child is riding a horse. 4.750 2 main-captions MSRvid 2012test 0003 none none A man is feeding a mouse to a snake. The man is feeding a mouse to the snake. 5.000 3 main-captions MSRvid 2012test 0007 none none A woman is playing the guitar. A man is playing guitar. 2.400 4 main-captions MSRvid 2012test 0008 none none A woman is playing the flute. A man is playing a flute. 2.750 5 main-captions MSRvid 2012test 0010 none none A woman is cutting an onion. A man is cutting onions. 2.615 6 main-captions MSRvid 2012test 0015 none none A man is erasing a chalk board. The man is erasing the chalk board. 5.000 7 main-captions MSRvid 2012test 0023 none none A woman is carrying a boy. A woman is carrying her baby. 2.333 8 main-captions MSRvid 2012test 0027 none none Three men are playing guitars. Three men are on stage playing guitars. 3.750
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/SQUAD/sample.json
{ "version": 2.0, "data": [ { "id": "56ddde6b9a695914005b9628", "question": "In what country is Normandy located?", "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.", "answers": { "answer_start": [ 159, 159, 159, 159 ], "text": [ "France", "France", "France", "France" ] } }, { "id": "56ddde6b9a695914005b9629", "question": "When were the Normans in Normandy?", "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.", "answers": { "answer_start": [ 94, 87, 94, 94 ], "text": [ "10th and 11th centuries", "in the 10th and 11th centuries", "10th and 11th centuries", "10th and 11th centuries" ] } }, { "id": "56ddde6b9a695914005b962a", "question": "From which countries did the Norse originate?", "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.", "answers": { "answer_start": [ 256, 256, 256, 256 ], "text": [ "Denmark, Iceland and Norway", "Denmark, Iceland and Norway", "Denmark, Iceland and Norway", "Denmark, Iceland and Norway" ] } }, { "id": "5ad39d53604f3c001a3fe8d3", "question": "Who did King Charles III swear fealty to?", "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.", "answers": { "answer_start": [], "text": [] } }, { "id": "5ad39d53604f3c001a3fe8d4", "question": "When did the Frankish identity emerge?", "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.", "answers": { "answer_start": [], "text": [] } }, { "id": "56dddf4066d3e219004dad5f", "question": "Who was the duke in the battle of Hastings?", "context": "The Norman dynasty had a major political, cultural and military impact on medieval Europe and even the Near East. The Normans were famed for their martial spirit and eventually for their Christian piety, becoming exponents of the Catholic orthodoxy into which they assimilated. They adopted the Gallo-Romance language of the Frankish land they settled, their dialect becoming known as Norman, Normaund or Norman French, an important literary language. The Duchy of Normandy, which they formed by treaty with the French crown, was a great fief of medieval France, and under Richard I of Normandy was forged into a cohesive and formidable principality in feudal tenure. The Normans are noted both for their culture, such as their unique Romanesque architecture and musical traditions, and for their significant military accomplishments and innovations. Norman adventurers founded the Kingdom of Sicily under Roger II after conquering southern Italy on the Saracens and Byzantines, and an expedition on behalf of their duke, William the Conqueror, led to the Norman conquest of England at the Battle of Hastings in 1066. Norman cultural and military influence spread from these new European centres to the Crusader states of the Near East, where their prince Bohemond I founded the Principality of Antioch in the Levant, to Scotland and Wales in Great Britain, to Ireland, and to the coasts of north Africa and the Canary Islands.", "answers": { "answer_start": [ 1022, 1022, 1022 ], "text": [ "William the Conqueror", "William the Conqueror", "William the Conqueror" ] } }, { "id": "5ad3a266604f3c001a3fea2b", "question": "What principality did William the conquerer found?", "context": "The Norman dynasty had a major political, cultural and military impact on medieval Europe and even the Near East. The Normans were famed for their martial spirit and eventually for their Christian piety, becoming exponents of the Catholic orthodoxy into which they assimilated. They adopted the Gallo-Romance language of the Frankish land they settled, their dialect becoming known as Norman, Normaund or Norman French, an important literary language. The Duchy of Normandy, which they formed by treaty with the French crown, was a great fief of medieval France, and under Richard I of Normandy was forged into a cohesive and formidable principality in feudal tenure. The Normans are noted both for their culture, such as their unique Romanesque architecture and musical traditions, and for their significant military accomplishments and innovations. Norman adventurers founded the Kingdom of Sicily under Roger II after conquering southern Italy on the Saracens and Byzantines, and an expedition on behalf of their duke, William the Conqueror, led to the Norman conquest of England at the Battle of Hastings in 1066. Norman cultural and military influence spread from these new European centres to the Crusader states of the Near East, where their prince Bohemond I founded the Principality of Antioch in the Levant, to Scotland and Wales in Great Britain, to Ireland, and to the coasts of north Africa and the Canary Islands.", "answers": { "answer_start": [], "text": [] } }, { "id": "56e16182e3433e1400422e28", "question": "What branch of theoretical computer science deals with broadly classifying computational problems by difficulty and class of relationship?", "context": "Computational complexity theory is a branch of the theory of computation in theoretical computer science that focuses on classifying computational problems according to their inherent difficulty, and relating those classes to each other. A computational problem is understood to be a task that is in principle amenable to being solved by a computer, which is equivalent to stating that the problem may be solved by mechanical application of mathematical steps, such as an algorithm.", "answers": { "answer_start": [ 0, 0, 0 ], "text": [ "Computational complexity theory", "Computational complexity theory", "Computational complexity theory" ] } }, { "id": "5ad5316b5b96ef001a10ab76", "question": "What is a manual application of mathematical steps?", "context": "Computational complexity theory is a branch of the theory of computation in theoretical computer science that focuses on classifying computational problems according to their inherent difficulty, and relating those classes to each other. A computational problem is understood to be a task that is in principle amenable to being solved by a computer, which is equivalent to stating that the problem may be solved by mechanical application of mathematical steps, such as an algorithm.", "answers": { "answer_start": [], "text": [] } }, { "id": "56e16839cd28a01900c67887", "question": "What measure of a computational problem broadly defines the inherent difficulty of the solution?", "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do.", "answers": { "answer_start": [ 46, 49, 46 ], "text": [ "if its solution requires significant resources", "its solution requires significant resources", "if its solution requires significant resources" ] } }, { "id": "56e16839cd28a01900c67888", "question": "What method is used to intuitively assess or quantify the amount of resources required to solve a computational problem?", "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do.", "answers": { "answer_start": [ 176, 176, 176 ], "text": [ "mathematical models of computation", "mathematical models of computation", "mathematical models of computation" ] } }, { "id": "56e16839cd28a01900c67889", "question": "What are two basic primary resources used to guage complexity?", "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do.", "answers": { "answer_start": [ 305, 305, 305 ], "text": [ "time and storage", "time and storage", "time and storage" ] } }, { "id": "5ad532575b96ef001a10ab7f", "question": "What unit is measured to determine circuit simplicity?", "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do.", "answers": { "answer_start": [], "text": [] } }, { "id": "5ad532575b96ef001a10ab80", "question": "What number is used in perpendicular computing?", "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do.", "answers": { "answer_start": [], "text": [] } } ] }
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/swag/sample.json
{"ending0": "passes by walking down the street playing their instruments.", "ending1": "has heard approaching them.", "ending2": "arrives and they're outside dancing and asleep.", "ending3": "turns the lead singer watches the performance.", "label": 0, "sent1": "Members of the procession walk down the street holding small horn brass instruments.", "sent2": "A drum line"} {"ending0": "are playing ping pong and celebrating one left each in quick.", "ending1": "wait slowly towards the cadets.", "ending2": "continues to play as well along the crowd along with the band being interviewed.", "ending3": "continue to play marching, interspersed.", "label": 3, "sent1": "A drum line passes by walking down the street playing their instruments.", "sent2": "Members of the procession"} {"ending0": "pay the other coaches to cheer as people this chatter dips in lawn sheets.", "ending1": "walk down the street holding small horn brass instruments.", "ending2": "is seen in the background.", "ending3": "are talking a couple of people playing a game of tug of war.", "label": 1, "sent1": "A group of members in green uniforms walks waving flags.", "sent2": "Members of the procession"} {"ending0": "are playing ping pong and celebrating one left each in quick.", "ending1": "wait slowly towards the cadets.", "ending2": "makes a square call and ends by jumping down into snowy streets where fans begin to take their positions.", "ending3": "play and go back and forth hitting the drums while the audience claps for them.", "label": 3, "sent1": "A drum line passes by walking down the street playing their instruments.", "sent2": "Members of the procession"} {"ending0": "finishes the song and lowers the instrument.", "ending1": "hits the saxophone and demonstrates how to properly use the racquet.", "ending2": "finishes massage the instrument again and continues.", "ending3": "continues dancing while the man gore the music outside while drums.", "label": 0, "sent1": "The person plays a song on the violin.", "sent2": "The man"} {"ending0": "finishes playing then marches their tenderly.", "ending1": "walks in frame and rubs on his hands, and then walks into a room.", "ending2": "continues playing guitar while moving from the camera.", "ending3": "plays a song on the violin.", "label": 3, "sent1": "The person holds up the violin to his chin and gets ready.", "sent2": "The person"} {"ending0": "examines the instrument in his hand.", "ending1": "stops playing the drums and waves over the other boys.", "ending2": "lights the cigarette and sticks his head in.", "ending3": "drags off the vacuum.", "label": 0, "sent1": "A person retrieves an instrument from a closet.", "sent2": "The man"} {"ending0": "studies a picture of the man playing the violin.", "ending1": "holds up the violin to his chin and gets ready.", "ending2": "stops to speak to the camera again.", "ending3": "puts his arm around the man and backs away.", "label": 1, "sent1": "The man examines the instrument in his hand.", "sent2": "The person"} {"ending0": "hands her another phone.", "ending1": "takes the drink, then holds it.", "ending2": "looks off then looks at someone.", "ending3": "stares blearily down at the floor.", "label": 3, "sent1": "Someone walks over to the radio.", "sent2": "Someone"} {"ending0": "looks off then looks at someone.", "ending1": "hands her another phone.", "ending2": "takes the drink, then holds it.", "ending3": "turns on a monitor.", "label": 3, "sent1": "Someone walks over to the radio.", "sent2": "Someone"}
0
hf_public_repos/transformers/tests/fixtures/tests_samples
hf_public_repos/transformers/tests/fixtures/tests_samples/wiki_text/wiki_00
<doc id="12" url="https://en.wikipedia.org/wiki?curid=12" title="Anarchism"> Anarchism Anarchism is a political philosophy and movement that rejects all involuntary, coercive forms of hierarchy. It radically calls for the abolition of the state which it holds to be undesirable, unnecessary, and harmful. The history of anarchism stretches back to prehistory, when humans lived in anarchistic societies long before the establishment of formal states, realms or empires. With the rise of organised hierarchical bodies, skepticism toward authority also rose, but it was not until the 19th century that a self-conscious political movement emerged. During the latter half of the 19th and the first decades of the 20th century, the anarchist movement flourished in most parts of the world and had a significant role in worker's struggles for emancipation. Various anarchist schools of thought formed during this period. Anarchists took part in several revolutions, most notably in the Spanish Civil War, where they were crushed along with the alliance to restore the Second Republic by the fascist forces of the Nationalist faction and its foreign allies in Nazi Germany, Fascist Italy, Portuguese Dictatorship and the Catholic Church in 1939, marking the end of the classical era of anarchism. In the last decades of the 20th century and into the 21st century, the anarchist movement has been resurgent once more. Anarchism employs various tactics in order to meet its ideal ends; these can be broadly separated into revolutionary and evolutionary tactics. There is significant overlap between the two, which are merely descriptive. Revolutionary tactics aim to bring down authority and state, and have taken a violent turn in the past. Evolutionary tactics aim to prefigure what an anarchist society would be like. Anarchist thought, criticism, and praxis has played a part in diverse areas of human society. The etymological origin of "anarchism" is from the Ancient Greek "anarkhia", meaning "without a ruler", composed of the prefix "an-" (i.e. "without") and the word "arkhos" (i.e. "leader" or "ruler"). The suffix "-ism" denotes the ideological current that favours anarchy. "Anarchism" appears in English from 1642 as "anarchisme" and "anarchy" from 1539. Various factions within the French Revolution labelled their opponents as "anarchists", although few such accused shared many views with later anarchists. Many revolutionaries of the 19th century such as William Godwin (1756–1836) and Wilhelm Weitling (1808–1871) would contribute to the anarchist doctrines of the next generation, but they did not use "anarchist" or "anarchism" in describing themselves or their beliefs. The first political philosopher to call himself an "anarchist" () was Pierre-Joseph Proudhon (1809–1865), marking the formal birth of anarchism in the mid-19th century. Since the 1890s and beginning in France, "libertarianism" has often been used as a synonym for anarchism and its use as a synonym is still common outside the United States. On the other hand, some use "libertarianism" to refer to individualistic free-market philosophy only, referring to free-market anarchism as "libertarian anarchism". While opposition to the state is central to anarchist thought, defining anarchism is not an easy task as there is a lot of discussion among scholars and anarchists on the matter and various currents perceive anarchism slightly differently. Hence, it might be true to say that anarchism is a cluster of political philosophies opposing authority and hierarchical organization (including the state, capitalism, nationalism and all associated institutions) in the conduct of all human relations in favour of a society based on voluntary association, on freedom and on decentralisation, but this definition has the same shortcomings as the definition based on etymology (which is simply a negation of a ruler), or based on anti-statism (anarchism is much more than that) or even the anti-authoritarian (which is an "a posteriori" conclusion). Nonetheless, major elements of the definition of anarchism include the following: During the prehistoric era of mankind, an established authority did not exist. It was after the creation of towns and cities that institutions of authority were established and anarchistic ideas espoused as a reaction. Most notable precursors to anarchism in the ancient world were in China and Greece. In China, philosophical anarchism (i.e. the discussion on the legitimacy of the state) was delineated by Taoist philosophers Zhuang Zhou and Laozi. Likewise, anarchic attitudes were articulated by tragedians and philosophers in Greece. Aeschylus and Sophocles used the myth of Antigone to illustrate the conflict between rules set by the state and personal autonomy. Socrates questioned Athenian authorities constantly and insisted to the right of individual freedom of consciousness. Cynics dismissed human law ("nomos") and associated authorities while trying to live according to nature ("physis"). Stoics were supportive of a society based on unofficial and friendly relations among its citizens without the presence of a state. During the Middle Ages, there was no anarchistic activity except some ascetic religious movements in the Muslim world or in Christian Europe. This kind of tradition later gave birth to religious anarchism. In the Sasanian Empire, Mazdak called for an egalitarian society and the abolition of monarchy, only to be soon executed by Emperor Kavad I. In Basra, religious sects preached against the state. In Europe, various sects developed anti-state and libertarian tendencies. Libertarian ideas further emerged during the Renaissance with the spread of reasoning and humanism through Europe. Novelists fictionalised ideal societies that were based not on coercion but voluntarism. The Enlightenment further pushed towards anarchism with the optimism for social progress. During the French Revolution, partisan groups such as the Enragés and the saw a turning point in the fermentation of anti-state and federalist sentiments. The first anarchist currents developed throughout the 18th century—William Godwin espoused philosophical anarchism in England, morally delegitimizing the state, Max Stirner's thinking paved the way to individualism, and Pierre-Joseph Proudhon's theory of mutualism found fertile soil in France. This era of classical anarchism lasted until the end of the Spanish Civil War of 1936 and is considered the golden age of anarchism. Drawing from mutualism, Mikhail Bakunin founded collectivist anarchism and entered the International Workingmen's Association, a class worker union later known as the First International that formed in 1864 to unite diverse revolutionary currents. The International became a significant political force, with Karl Marx being a leading figure and a member of its General Council. Bakunin's faction (the Jura Federation) and Proudhon's followers (the mutualists) opposed Marxist state socialism, advocating political abstentionism and small property holdings. After bitter disputes, the Bakuninists were expelled from the International by the Marxists at the 1872 Hague Congress. Bakunin famously predicted that if revolutionaries gained power by Marx's terms, they would end up the new tyrants of workers. After being expelled, anarchists formed the St. Imier International. Under the influence of Peter Kropotkin, a Russian philosopher and scientist, anarcho-communism overlapped with collectivism. Anarcho-communists, who drew inspiration from the 1871 Paris Commune, advocated for free federation and for the distribution of goods according to one's needs. At the turn of the century, anarchism had spread all over the world. In China, small groups of students imported the humanistic pro-science version of anarcho-communism. Tokyo was a hotspot for rebellious youth from countries of the far east, travelling to the Japanese capital to study. In Latin America, Argentina was a stronghold for anarcho-syndicalism, where it became the most prominent left-wing ideology. During this time, a minority of anarchists adopted tactics of revolutionary political violence. This strategy became known as propaganda of the deed. The dismemberment of the French socialist movement into many groups, and the execution and exile of many Communards to penal colonies following the suppression of the Paris Commune, favoured individualist political expression and acts. Even though many anarchists distanced themselves from these terrorist acts, infamy came upon the movement. Illegalism was another strategy which some anarchists adopted during this period. Anarchists enthusiastically participated in the Russian Revolution—despite concerns—in opposition to the Whites. However, they met harsh suppression after the Bolshevik government was stabilized. Several anarchists from Petrograd and Moscow fled to Ukraine, notably leading to the Kronstadt rebellion and Nestor Makhno's struggle in the Free Territory. With the anarchists being crushed in Russia, two new antithetical currents emerged, namely platformism and synthesis anarchism. The former sought to create a coherent group that would push for revolution while the latter were against anything that would resemble a political party. Seeing the victories of the Bolsheviks in the October Revolution and the resulting Russian Civil War, many workers and activists turned to communist parties, which grew at the expense of anarchism and other socialist movements. In France and the United States, members of major syndicalist movements, the General Confederation of Labour and Industrial Workers of the World, left their organisations and joined the Communist International. In the Spanish Civil War, anarchists and syndicalists (CNT and FAI) once again allied themselves with various currents of leftists. A long tradition of Spanish anarchism led to anarchists playing a pivotal role in the war. In response to the army rebellion, an anarchist-inspired movement of peasants and workers, supported by armed militias, took control of Barcelona and of large areas of rural Spain, where they collectivised the land. The Soviet Union provided some limited assistance at the beginning of the war, but the result was a bitter fight among communists and anarchists at a series of events named May Days as Joseph Stalin tried to seize control of the Republicans. At the end of World War II, the anarchist movement was severely weakened. However, the 1960s witnessed a revival of anarchism likely caused by a perceived failure of Marxism–Leninism and tensions built by the Cold War. During this time, anarchism took root in other movements critical towards both the state and capitalism, such as the anti-nuclear, environmental and pacifist movements, the New Left, and the counterculture of the 1960s. Anarchism became associated with punk subculture, as exemplified by bands such as Crass and the Sex Pistols, and the established feminist tendencies of anarcha-feminism returned with vigour during the second wave of feminism. Around the turn of the 21st century, anarchism grew in popularity and influence within anti-war, anti-capitalist, and anti-globalisation movements. Anarchists became known for their involvement in protests against the World Trade Organization, the Group of Eight and the World Economic Forum. During the protests, "ad hoc" leaderless anonymous cadres known as black blocs engaged in rioting, property destruction, and violent confrontations with the police. Other organisational tactics pioneered in this time include security culture, affinity groups, and the use of decentralised technologies such as the internet. A significant event of this period was the confrontations at the WTO conference in Seattle in 1999. Anarchist ideas have been influential in the development of the Zapatistas in Mexico and the Democratic Federation of Northern Syria, more commonly known as Rojava, a "de facto" autonomous region in northern Syria. Anarchist schools of thought have been generally grouped into two main historical traditions, social anarchism and individualist anarchism, owing to their different origins, values and evolution. The individualist current emphasises negative liberty in opposing restraints upon the free individual, while the social current emphasises positive liberty in aiming to achieve the free potential of society through equality and social ownership. In a chronological sense, anarchism can be segmented by the classical currents of the late 19th century, and the post-classical currents (such as anarcha-feminism, green anarchism and post-anarchism) developed thereafter. Beyond the specific factions of anarchist movements which constitute political anarchism lies philosophical anarchism, which holds that the state lacks moral legitimacy, without necessarily accepting the imperative of revolution to eliminate it. A component especially of individualist anarchism, philosophical anarchism may tolerate the existence of a minimal state, but argues that citizens have no moral obligation to obey government when it conflicts with individual autonomy. Anarchism pays significant attention to moral arguments since ethics have a central role in anarchist philosophy. One reaction against sectarianism within the anarchist milieu was anarchism without adjectives, a call for toleration and unity among anarchists first adopted by Fernando Tarrida del Mármol in 1889 in response to the bitter debates of anarchist theory at the time. Despite separation, the various anarchist schools of thought are not seen as distinct entities, but as tendencies that intermingle. Anarchism is usually placed on the far-left of the political spectrum. Much of its economics and legal philosophy reflect anti-authoritarian, anti-statist, and libertarian interpretations of the radical left-wing and socialist politics of collectivism, communism, individualism, mutualism, and syndicalism, among other libertarian socialist economic theories. As anarchism does not offer a fixed body of doctrine from a single particular worldview, many anarchist types and traditions exist, and varieties of anarchy diverge widely. Inceptive currents among classical anarchist currents were mutualism and individualism. They were followed by the major currents of social anarchism (collectivist, communist, and syndicalist). They differ on organizational and economic aspects of their ideal society. Mutualism is an 18th-century economic theory that was developed into anarchist theory by Pierre-Joseph Proudhon. Its aims include reciprocity, free association, voluntary contract, federation, and credit and currency reform that would be regulated by a bank of the people. Mutualism has been retrospectively characterised as ideologically situated between individualist and collectivist forms of anarchism. Proudhon first characterised his goal as a "third form of society, the synthesis of communism and property". Collectivist anarchism, also known as anarchist collectivism or anarcho-collectivism, is a revolutionary socialist form of anarchism commonly associated with Mikhail Bakunin. Collectivist anarchists advocate collective ownership of the means of production, theorised to be achieved through violent revolution, and that workers be paid according to time worked, rather than goods being distributed according to need as in communism. Collectivist anarchism arose alongside Marxism, but rejected the dictatorship of the proletariat despite the stated Marxist goal of a collectivist stateless society. Anarcho-communism, also known as anarchist-communism, communist anarchism, and libertarian communism, is a theory of anarchism that advocates a communist society with common ownership of the means of production, direct democracy, and a horizontal network of voluntary associations and workers' councils with production and consumption based on the guiding principle: "From each according to his ability, to each according to his need". Anarcho-communism developed from radical socialist currents after the French Revolution, but it was first formulated as such in the Italian section of the First International. It was later expanded upon in the theoretical work of Peter Kropotkin. Anarcho-syndicalism, also referred to as revolutionary syndicalism, is a branch of anarchism that views labour syndicates as a potential force for revolutionary social change, replacing capitalism and the state with a new society democratically self-managed by workers. The basic principles of anarcho-syndicalism are workers' solidarity, direct action, and workers' self-management. Individualist anarchism refers to several traditions of thought within the anarchist movement that emphasise the individual and their will over any kinds of external determinants. Early influences on individualist forms of anarchism include William Godwin, Max Stirner and Henry David Thoreau. Through many countries, individualist anarchism attracted a small yet diverse following of Bohemian artists and intellectuals as well as young anarchist outlaws in what became known as illegalism and individual reclamation. Anarchist principles undergird contemporary radical social movements of the left. Interest in the anarchist movement developed alongside momentum in the anti-globalization movement, whose leading activist networks were anarchist in orientation. As the movement shaped 21st century radicalism, wider embrace of anarchist principles signaled a revival of interest. Contemporary news coverage which emphasizes black bloc demonstrations has reinforced anarchism's historical association with chaos and violence, although its publicity has also led more scholars to engage with the anarchist movement. Anarchism has continued to generate many philosophies and movements—at times eclectic, drawing upon various sources, and syncretic, combining disparate concepts to create new philosophical approaches. The anti-capitalist tradition of classical anarchism has remained prominent within contemporary currents. Various anarchist groups, tendencies, and schools of thought exist today, making it difficult to describe contemporary anarchist movement. While theorists and activists have established "relatively stable constellations of anarchist principles", there is no consensus on which principles are core. As a result, commentators describe multiple "anarchisms" (rather than a singular "anarchism") in which common principles are shared between schools of anarchism while each group prioritizes those principles differently. For example, gender equality can be a common principle but ranks as a higher priority to anarcha-feminists than anarchist communists. Anarchists are generally committed against coercive authority in all forms, namely "all centralized and hierarchical forms of government (e.g., monarchy, representative democracy, state socialism, etc.), economic class systems (e.g., capitalism, Bolshevism, feudalism, slavery, etc.), autocratic religions (e.g., fundamentalist Islam, Roman Catholicism, etc.), patriarchy, heterosexism, white supremacy, and imperialism". However, anarchist schools disagree on the methods by which these forms should be opposed. Anarchists' tactics take various forms but in general serve two major goals—first, to oppose the Establishment; and second, to promote anarchist ethics and reflect an anarchist vision of society, illustrating the unity of means and ends. A broad categorization can be made between aims to destroy oppressive states and institutions by revolutionary means, and aims to change society through evolutionary means. Evolutionary tactics reject violence and take a gradual approach to anarchist aims, though there is significant overlap between the two. Anarchist tactics have shifted during the course of the last century. Anarchists during the early 20th century focused more on strikes and militancy, while contemporary anarchists use a broader array of approaches. During the classical era, anarchists had a militant tendency. Not only did they confront state armed forces (as in Spain and Ukraine) but some of them also employed terrorism as propaganda of the deed. Assassination attempts were carried out against heads of state, some of which were successful. Anarchists also took part in revolutions. Anarchist perspectives towards violence have always been perplexing and controversial. On one hand, anarcho-pacifists point out the unity of means and ends. On the other hand, other anarchist groups advocate direct action, a tactic which can include acts of sabotage or even acts of terrorism. This attitude was quite prominent a century ago; seeing the state as a tyrant, some anarchists believed that they had every right to oppose its oppression by any means possible. Emma Goldman and Errico Malatesta, who were proponents of limited use of violence, argued that violence is merely a reaction to state violence as a necessary evil. Anarchists took an active role in strikes, although they tended to be antipathetic to formal syndicalism, seeing it as reformist. They saw it as a part of the movement which sought to overthrow the state and capitalism. Anarchists also reinforced their propaganda within the arts, some of whom practiced nudism. They also built communities which were based on friendship. They were also involved in the press. In the current era, Italian anarchist Alfredo Bonanno, a proponent of insurrectionary anarchism, has reinstated the debate on violence by rejecting the nonviolence tactic adopted since the late 19th century by Kropotkin and other prominent anarchists afterwards. Both Bonanno and the French group The Invisible Committee advocate for small, informal affiliation groups, where each member is responsible for their own actions but works together to bring down oppression utilizing sabotage and other violent means against state, capitalism and other enemies. Members of The Invisible Committee were arrested in 2008 on various charges, terrorism included. Overall, today's anarchists are much less violent and militant than their ideological ancestors. They mostly engage in confronting the police during demonstrations and riots, especially in countries like Canada, Mexico or Greece. Μilitant black bloc protest groups are known for clashing with the police. However, anarchists not only clash with state operators; they also engage in the struggle against fascists and racists, taking anti-fascist action and mobilizing to prevent hate rallies from happening. Anarchists commonly employ direct action. This can take the form of disrupting and protesting against unjust hierarchy, or the form of self-managing their lives through the creation of counter-institutions such as communes and non-hierarchical collectives. Often, decision-making is handled in an anti-authoritarian way, with everyone having equal say in each decision, an approach known as horizontalism. Contemporary-era anarchists have been engaging with various grassroots movements that are not explicitly anarchist but are more or less based on horizontalism, respecting personal autonomy, and participating in mass activism such as strikes and demonstrations. The newly coined term "small-a anarchism", in contrast with the "big-A anarchism" of the classical era, signals their tendency not to base their thoughts and actions on classical-era anarchism or to refer to Kropotkin or Proudhon to justify their opinions. They would rather base their thought and praxis on their own experience, which they will later theorize. The decision-making process of small affinity anarchist groups play a significant tactical role. Anarchists have employed various methods in order to build a rough consensus among members of their group, without the need of a leader or a leading group. One way is for an individual from the group to play the role of facilitator to help achieve a consensus without taking part in the discussion themselves or promoting a specific point. Minorities usually accept rough consensus, except when they feel the proposal contradicts anarchist goals, values, or ethics. Anarchists usually form small groups (5–20 individuals) to enhance autonomy and friendships among their members. These kind of groups more often than not interconnect with each other, forming larger networks. Anarchists still support and participate in strikes, especially wildcat strikes; these are leaderless strikes not organised centrally by a syndicate. Anarchists have gone online to spread their message. As in the past, newspapers and journals are used; however, because of distributional and other difficulties, anarchists have found it easier to create websites, hosting electronic libraries and other portals. Anarchists were also involved in developing various software that are available for free. The way these hacktivists work to develop and distribute resembles the anarchist ideals, especially when it comes to preserving user's privacy from state surveillance. Anarchists organize themselves to squat and reclaim public spaces. During important events such as protests and when spaces are being occupied, they are often called Temporary Autonomous Zones (TAZ), spaces where surrealism, poetry and art are blended to display the anarchist ideal. As seen by anarchists, squatting is a way to regain urban space from the capitalist market, serving pragmatical needs, and is also seen an exemplary direct action. Acquiring space enables anarchists to experiment with their ideas and build social bonds. Adding up these tactics, and having in mind that not all anarchists share the same attitudes towards them, along with various forms of protesting at highly symbolic events, make up a carnivalesque atmosphere that is part of contemporary anarchist vividity. As anarchism is a philosophy that embodies many diverse attitudes, tendencies, and schools of thought, and disagreement over questions of values, ideology, and tactics is common, its diversity has led to widely different uses of identical terms among different anarchist traditions, which has created a number of definitional concerns in anarchist theory. For instance, the compatibility of capitalism, nationalism and religion with anarchism is widely disputed. Similarly, anarchism enjoys complex relationships with ideologies such as Marxism, communism, collectivism and trade unionism. Anarchists may be motivated by humanism, divine authority, enlightened self-interest, veganism, or any number of alternative ethical doctrines. Phenomena such as civilisation, technology (e.g. within anarcho-primitivism) and the democratic process may be sharply criticised within some anarchist tendencies and simultaneously lauded in others. Gender and sexuality carry along them dynamics of hierarchy; anarchism is obliged to address, analyse and oppose the suppression of one's autonomy because of the dynamics that gender roles traditionally impose. A historical current that arose and flourished during 1890 and 1920 within anarchism was free love; in contemporary anarchism, this current survives as a tendency to support polyamory and queer anarchism. Free love advocates were against marriage, which they saw as a way of men imposing authority over women, largely because marriage law greatly favoured the power of men. The notion of free love, though, was much broader; it included critique of the established order that limited women's sexual freedom and pleasure. Such free love movements contributed to the establishment of communal houses, where large groups of travelers, anarchists, and other activists slept in beds together. Free love had roots both in Europe and the United States. Some anarchists, however, struggled with the jealousy that arose from free love. Anarchist feminists were advocates of free love, against marriage, were pro-choice (utilizing a contemporary term) and had a likewise agenda. Anarchist and non-anarchist feminists differed on suffrage, but were nonetheless supportive of one another. During the second half of the 20th century, anarchism intermingled with the second wave of feminism, radicalizing some currents of the feminist movement (and being influenced as well). By the latest decades of the 20th century, anarchists and feminists were advocating for the rights and autonomy of women, gays, queers and other marginalized groups, with some feminist thinkers suggesting a fusion of the two currents. With the third wave of feminism, sexual identity and compulsory heterosexuality became a subject of study for anarchists, which yielded a post-structuralist critique of sexual normality. However, some anarchists distanced themselves from this line of thinking, suggesting that it leaned towards individualism and was, therefore, dropping the cause of social liberation. The interest of anarchists in education stretches back to the first emergence of classical anarchism. Anarchists consider 'proper' education, which sets the foundations of the future autonomy of the individual and the society, to be an act of mutual aid. Anarchist writers such as Willian Godwin and Max Stirner attacked both state education and private education as another means by which the ruling class replicate their privileges. In 1901, Catalan anarchist and free thinker Francisco Ferrer established the Escuela Moderna in Barcelona as an opposition to the established education system, which was dictated largely by the Catholic Church. Ferrer's approach was secular, rejecting both state and church involvement in the educational process, and gave pupils large amounts of autonomy in planning their work and attendance. Ferrer aimed to educate the working class and explicitly sought to foster class consciousness among students. The school closed after constant harassment by the state and Ferrer was later arrested. His ideas, however, formed the inspiration for a series of modern schools around the world. Christian anarchist Leo Tolstoy also established a similar school, with its founding principle, according to Tolstoy, being that "for education to be effective it had to be free". In a similar token, A. S. Neill founding what became Summerhill School in 1921, also declaring being free from coercion. Anarchist education is based largely on the idea that a child's right to develop freely, without manipulation, ought to be respected, and that rationality will lead children to morally good conclusions. However, there has been little consensus among anarchist figures as to what constitutes manipulation; Ferrer, for example, believed that moral indoctrination was necessary and explicitly taught pupils that equality, liberty, and social justice were not possible under capitalism (along with other critiques of nationalism and government). Late 20th century and contemporary anarchist writers (such as Colin Ward, Herbert Read and Paul Goodman) intensified and expanded the anarchist critique of state education, largely focusing on the need for a system that focuses on children's creativity rather than on their ability to attain a career or participate in consumer society. Contemporary anarchists, such as Colin Ward, have further argued that state education serves to perpetuate socio-economic inequality. While few anarchist education institutions have survived to the modern day, major tenets of anarchist schools, such as respect for child autonomy and relying on reasoning rather than indoctrination as a teaching method, have spread among mainstream educational institutions. Objection to the state and its institutions is a "sine qua non" of anarchism. Anarchists consider the state as a tool of domination and believe it to be illegitimate regardless of its political tendencies. Instead of people being able to control the aspects of their life, major decisions are taken by a small elite. Authority ultimately rests solely on power, regardless of whether that power is open or transparent, as it still has the ability to coerce people. Another anarchist argument against states is that the people constituting a government, even the most altruistic among officials, will unavoidably seek to gain more power, leading to corruption. Anarchists consider the idea that the state is the collective will of the people to be an unachievable fiction, due to the fact that the ruling class is distinct from the rest of society. The connection between anarchism and art was quite profound during the classical era of anarchism, especially among artistic currents that were developing during that era, such as futurists, surrealists, and others, while in literature anarchism was mostly associated with the New Apocalyptics and the Neo-romanticism movement. In music, anarchism has been associated with music scenes such as Punk. Anarchists such as Leo Tolstoy and Herbert Read argued that the border between the artist and the non-artist, what separates art from a daily act, is a construct produced by the alienation caused by capitalism, and it prevents humans from living a joyful life. Other anarchists advocated for or used art as a means to achieve anarchist ends. In his book Breaking the Spell: A History of Anarchist Filmmakers, Videotape Guerrillas, and Digital Ninjas Chris Robé claims that "anarchist-inflected practices have increasingly structured movement-based video activism." Three overlapping properties made art useful to anarchists: It could depict a critique of existing society and hierarchies; it could serve as a prefigurative tool to reflect the anarchist ideal society, and also it could turn into a means of direct action, in protests for example. As it appeals to both emotion and reason, art could appeal to the "whole human" and have a powerful effect. Philosophy lecturer Andrew G. Fiala has listed five main arguments against anarchism. Firstly, he notes that anarchism is related to violence and destruction, not only in the pragmatic world (i.e. at protests) but in the world of ethics as well. The second argument is that it is impossible for a society to function without a state or something like a state, acting to protect citizens from criminality. Fiala takes "Leviathan" from Thomas Hobbes and the night-watchman state from philosopher Robert Nozick as examples. Thirdly, anarchism is evaluated as unfeasible or utopian since the state can not be defeated practically; this line of arguments most often calls for political action within the system to reform it. The fourth argument is that anarchism is self-contradictory since while it advocates for no-one to "archiei", if accepted by the many, then anarchism will turn into the ruling political theory. In this line of criticism also comes the self contradiction that anarchist calls for collective action while anarchism endorses the autonomy of the individual and hence no collective action can be taken. Lastly, Fiala mentions a critique towards philosophical anarchism, of being ineffective (all talk and thoughts) and in the meantime capitalism and bourgeois class remains strong. Philosophical anarchism has met the criticism of members of academia, following the release of pro-anarchist books such as A. John Simmons' "Moral Principles and Political Obligations" (1979). Law professor William A. Edmundson authored an essay arguing against three major philosophical anarchist principles, which he finds fallacious; Edmundson claims that while the individual does not owe a normal state a duty of obedience, this does not imply that anarchism is the inevitable conclusion, and the state is still morally legitimate. </doc> <doc id="25" url="https://en.wikipedia.org/wiki?curid=25" title="Autism"> Autism Autism is a developmental disorder characterized by difficulties with social interaction and communication, and by restricted and repetitive behavior. Parents often notice signs during the first three years of their child's life. These signs often develop gradually, though some children with autism experience worsening in their communication and social skills after reaching developmental milestones at a normal pace. Autism is associated with a combination of genetic and environmental factors. Risk factors during pregnancy include certain infections, such as rubella, toxins including valproic acid, alcohol, cocaine, pesticides, lead, and air pollution, fetal growth restriction, and autoimmune diseases. Controversies surround other proposed environmental causes; for example, the vaccine hypothesis, which has been disproven. Autism affects information processing in the brain and how nerve cells and their synapses connect and organize; how this occurs is not well understood. The Diagnostic and Statistical Manual of Mental Disorders (DSM-5), combines autism and less severe forms of the condition, including Asperger syndrome and pervasive developmental disorder not otherwise specified (PDD-NOS) into the diagnosis of autism spectrum disorder (ASD). Early behavioral interventions or speech therapy can help children with autism gain self-care, social, and communication skills. Although there is no known cure, there have been cases of children who recovered. Some autistic adults are unable to live independently. An autistic culture has developed, with some individuals seeking a cure and others believing autism should be accepted as a difference to be accommodated instead of cured. Globally, autism is estimated to affect 24.8 million people . In the 2000s, the number of people affected was estimated at 1–2 per 1,000 people worldwide. In the developed countries, about 1.5% of children are diagnosed with ASD , from 0.7% in 2000 in the United States. It occurs four-to-five times more often in males than females. The number of people diagnosed has increased dramatically since the 1960s, which may be partly due to changes in diagnostic practice. The question of whether actual rates have increased is unresolved. Autism is a highly variable, neurodevelopmental disorder whose symptoms first appears during infancy or childhood, and generally follows a steady course without remission. People with autism may be severely impaired in some respects but average, or even superior, in others. Overt symptoms gradually begin after the age of six months, become established by age two or three years and tend to continue through adulthood, although often in more muted form. It is distinguished by a characteristic triad of symptoms: impairments in social interaction, impairments in communication, and repetitive behavior. Other aspects, such as atypical eating, are also common but are not essential for diagnosis. Individual symptoms of autism occur in the general population and appear not to associate highly, without a sharp line separating pathologically severe from common traits. Social deficits distinguish autism and the related autism spectrum disorders (ASD; see Classification) from other developmental disorders. People with autism have social impairments and often lack the intuition about others that many people take for granted. Noted autistic Temple Grandin described her inability to understand the social communication of neurotypicals, or people with typical neural development, as leaving her feeling "like an anthropologist on Mars". Unusual social development becomes apparent early in childhood. Autistic infants show less attention to social stimuli, smile and look at others less often, and respond less to their own name. Autistic toddlers differ more strikingly from social norms; for example, they have less eye contact and turn-taking, and do not have the ability to use simple movements to express themselves, such as pointing at things. Three- to five-year-old children with autism are less likely to exhibit social understanding, approach others spontaneously, imitate and respond to emotions, communicate nonverbally, and take turns with others. However, they do form attachments to their primary caregivers. Most children with autism display moderately less attachment security than neurotypical children, although this difference disappears in children with higher mental development or less pronounced autistic traits. Older children and adults with ASD perform worse on tests of face and emotion recognition although this may be partly due to a lower ability to define a person's own emotions. Children with high-functioning autism have more intense and frequent loneliness compared to non-autistic peers, despite the common belief that children with autism prefer to be alone. Making and maintaining friendships often proves to be difficult for those with autism. For them, the quality of friendships, not the number of friends, predicts how lonely they feel. Functional friendships, such as those resulting in invitations to parties, may affect the quality of life more deeply. There are many anecdotal reports, but few systematic studies, of aggression and violence in individuals with ASD. The limited data suggest that, in children with intellectual disability, autism is associated with aggression, destruction of property, and meltdowns. About a third to a half of individuals with autism do not develop enough natural speech to meet their daily communication needs. Differences in communication may be present from the first year of life, and may include delayed onset of babbling, unusual gestures, diminished responsiveness, and vocal patterns that are not synchronized with the caregiver. In the second and third years, children with autism have less frequent and less diverse babbling, consonants, words, and word combinations; their gestures are less often integrated with words. Children with autism are less likely to make requests or share experiences, and are more likely to simply repeat others' words (echolalia) or reverse pronouns. Joint attention seems to be necessary for functional speech, and deficits in joint attention seem to distinguish infants with ASD. For example, they may look at a pointing hand instead of the pointed-at object, and they consistently fail to point at objects in order to comment on or share an experience. Children with autism may have difficulty with imaginative play and with developing symbols into language. In a pair of studies, high-functioning children with autism aged 8–15 performed equally well as, and as adults better than, individually matched controls at basic language tasks involving vocabulary and spelling. Both autistic groups performed worse than controls at complex language tasks such as figurative language, comprehension and inference. As people are often sized up initially from their basic language skills, these studies suggest that people speaking to autistic individuals are more likely to overestimate what their audience comprehends. Autistic individuals can display many forms of repetitive or restricted behavior, which the Repetitive Behavior Scale-Revised (RBS-R) categorizes as follows. No single repetitive or self-injurious behavior seems to be specific to autism, but autism appears to have an elevated pattern of occurrence and severity of these behaviors. Autistic individuals may have symptoms that are independent of the diagnosis, but that can affect the individual or the family. An estimated 0.5% to 10% of individuals with ASD show unusual abilities, ranging from splinter skills such as the memorization of trivia to the extraordinarily rare talents of prodigious autistic savants. Many individuals with ASD show superior skills in perception and attention, relative to the general population. Sensory abnormalities are found in over 90% of those with autism, and are considered core features by some, although there is no good evidence that sensory symptoms differentiate autism from other developmental disorders. Differences are greater for under-responsivity (for example, walking into things) than for over-responsivity (for example, distress from loud noises) or for sensation seeking (for example, rhythmic movements). An estimated 60–80% of autistic people have motor signs that include poor muscle tone, poor motor planning, and toe walking; deficits in motor coordination are pervasive across ASD and are greater in autism proper. Unusual eating behavior occurs in about three-quarters of children with ASD, to the extent that it was formerly a diagnostic indicator. Selectivity is the most common problem, although eating rituals and food refusal also occur. There is tentative evidence that autism occurs more frequently in people with gender dysphoria. Gastrointestinal problems are one of the most commonly associated medical disorders in people with autism. These are linked to greater social impairment, irritability, behavior and sleep problems, language impairments and mood changes. Parents of children with ASD have higher levels of stress. Siblings of children with ASD report greater admiration of and less conflict with the affected sibling than siblings of unaffected children and were similar to siblings of children with Down syndrome in these aspects of the sibling relationship. However, they reported lower levels of closeness and intimacy than siblings of children with Down syndrome; siblings of individuals with ASD have greater risk of negative well-being and poorer sibling relationships as adults. It has long been presumed that there is a common cause at the genetic, cognitive, and neural levels for autism's characteristic triad of symptoms. However, there is increasing suspicion that autism is instead a complex disorder whose core aspects have distinct causes that often co-occur. Autism has a strong genetic basis, although the genetics of autism are complex and it is unclear whether ASD is explained more by rare mutations with major effects, or by rare multigene interactions of common genetic variants. Complexity arises due to interactions among multiple genes, the environment, and epigenetic factors which do not change DNA sequencing but are heritable and influence gene expression. Many genes have been associated with autism through sequencing the genomes of affected individuals and their parents. Studies of twins suggest that heritability is 0.7 for autism and as high as 0.9 for ASD, and siblings of those with autism are about 25 times more likely to be autistic than the general population. However, most of the mutations that increase autism risk have not been identified. Typically, autism cannot be traced to a Mendelian (single-gene) mutation or to a single chromosome abnormality, and none of the genetic syndromes associated with ASDs have been shown to selectively cause ASD. Numerous candidate genes have been located, with only small effects attributable to any particular gene. Most loci individually explain less than 1% of cases of autism. The large number of autistic individuals with unaffected family members may result from spontaneous structural variation—such as deletions, duplications or inversions in genetic material during meiosis. Hence, a substantial fraction of autism cases may be traceable to genetic causes that are highly heritable but not inherited: that is, the mutation that causes the autism is not present in the parental genome. Autism may be underdiagnosed in women and girls due to an assumption that it is primarily a male condition, but genetic phenomena such as imprinting and X linkage have the ability to raise the frequency and severity of conditions in males, and theories have been put forward for a genetic reason why males are diagnosed more often, such as the imprinted brain theory and the extreme male brain theory. Maternal nutrition and inflammation during preconception and pregnancy influences fetal neurodevelopment. Intrauterine growth restriction is associated with ASD, in both term and preterm infants. Maternal inflammatory and autoimmune diseases may damage fetal tissues, aggravating a genetic problem or damaging the nervous system. Exposure to air pollution during pregnancy, especially heavy metals and particulates, may increase the risk of autism. Environmental factors that have been claimed without evidence to contribute to or exacerbate autism include certain foods, infectious diseases, solvents, PCBs, phthalates and phenols used in plastic products, pesticides, brominated flame retardants, alcohol, smoking, illicit drugs, vaccines, and prenatal stress. Some, such as the MMR vaccine, have been completely disproven. Parents may first become aware of autistic symptoms in their child around the time of a routine vaccination. This has led to unsupported theories blaming vaccine "overload", a vaccine preservative, or the MMR vaccine for causing autism. The latter theory was supported by a litigation-funded study that has since been shown to have been "an elaborate fraud". Although these theories lack convincing scientific evidence and are biologically implausible, parental concern about a potential vaccine link with autism has led to lower rates of childhood immunizations, outbreaks of previously controlled childhood diseases in some countries, and the preventable deaths of several children. Autism's symptoms result from maturation-related changes in various systems of the brain. How autism occurs is not well understood. Its mechanism can be divided into two areas: the pathophysiology of brain structures and processes associated with autism, and the neuropsychological linkages between brain structures and behaviors. The behaviors appear to have multiple pathophysiologies. There is evidence that gut–brain axis abnormalities may be involved. A 2015 review proposed that immune dysregulation, gastrointestinal inflammation, malfunction of the autonomic nervous system, gut flora alterations, and food metabolites may cause brain neuroinflammation and dysfunction. A 2016 review concludes that enteric nervous system abnormalities might play a role in neurological disorders such as autism. Neural connections and the immune system are a pathway that may allow diseases originated in the intestine to spread to the brain. Several lines of evidence point to synaptic dysfunction as a cause of autism. Some rare mutations may lead to autism by disrupting some synaptic pathways, such as those involved with cell adhesion. Gene replacement studies in mice suggest that autistic symptoms are closely related to later developmental steps that depend on activity in synapses and on activity-dependent changes. All known teratogens (agents that cause birth defects) related to the risk of autism appear to act during the first eight weeks from conception, and though this does not exclude the possibility that autism can be initiated or affected later, there is strong evidence that autism arises very early in development. Diagnosis is based on behavior, not cause or mechanism. Under the DSM-5, autism is characterized by persistent deficits in social communication and interaction across multiple contexts, as well as restricted, repetitive patterns of behavior, interests, or activities. These deficits are present in early childhood, typically before age three, and lead to clinically significant functional impairment. Sample symptoms include lack of social or emotional reciprocity, stereotyped and repetitive use of language or idiosyncratic language, and persistent preoccupation with unusual objects. The disturbance must not be better accounted for by Rett syndrome, intellectual disability or global developmental delay. ICD-10 uses essentially the same definition. Several diagnostic instruments are available. Two are commonly used in autism research: the Autism Diagnostic Interview-Revised (ADI-R) is a semistructured parent interview, and the Autism Diagnostic Observation Schedule (ADOS) uses observation and interaction with the child. The Childhood Autism Rating Scale (CARS) is used widely in clinical environments to assess severity of autism based on observation of children. The Diagnostic interview for social and communication disorders (DISCO) may also be used. A pediatrician commonly performs a preliminary investigation by taking developmental history and physically examining the child. If warranted, diagnosis and evaluations are conducted with help from ASD specialists, observing and assessing cognitive, communication, family, and other factors using standardized tools, and taking into account any associated medical conditions. A pediatric neuropsychologist is often asked to assess behavior and cognitive skills, both to aid diagnosis and to help recommend educational interventions. A differential diagnosis for ASD at this stage might also consider intellectual disability, hearing impairment, and a specific language impairment such as Landau–Kleffner syndrome. The presence of autism can make it harder to diagnose coexisting psychiatric disorders such as depression. Clinical genetics evaluations are often done once ASD is diagnosed, particularly when other symptoms already suggest a genetic cause. Although genetic technology allows clinical geneticists to link an estimated 40% of cases to genetic causes, consensus guidelines in the US and UK are limited to high-resolution chromosome and fragile X testing. A genotype-first model of diagnosis has been proposed, which would routinely assess the genome's copy number variations. As new genetic tests are developed several ethical, legal, and social issues will emerge. Commercial availability of tests may precede adequate understanding of how to use test results, given the complexity of autism's genetics. Metabolic and neuroimaging tests are sometimes helpful, but are not routine. ASD can sometimes be diagnosed by age 14 months, although diagnosis becomes increasingly stable over the first three years of life: for example, a one-year-old who meets diagnostic criteria for ASD is less likely than a three-year-old to continue to do so a few years later. In the UK the National Autism Plan for Children recommends at most 30 weeks from first concern to completed diagnosis and assessment, though few cases are handled that quickly in practice. Although the symptoms of autism and ASD begin early in childhood, they are sometimes missed; years later, adults may seek diagnoses to help them or their friends and family understand themselves, to help their employers make adjustments, or in some locations to claim disability living allowances or other benefits. Girls are often diagnosed later than boys. Underdiagnosis and overdiagnosis are problems in marginal cases, and much of the recent increase in the number of reported ASD cases is likely due to changes in diagnostic practices. The increasing popularity of drug treatment options and the expansion of benefits has given providers incentives to diagnose ASD, resulting in some overdiagnosis of children with uncertain symptoms. Conversely, the cost of screening and diagnosis and the challenge of obtaining payment can inhibit or delay diagnosis. It is particularly hard to diagnose autism among the visually impaired, partly because some of its diagnostic criteria depend on vision, and partly because autistic symptoms overlap with those of common blindness syndromes or blindisms. Autism is one of the five pervasive developmental disorders (PDD), which are characterized by widespread abnormalities of social interactions and communication, and severely restricted interests and highly repetitive behavior. These symptoms do not imply sickness, fragility, or emotional disturbance. Of the five PDD forms, Asperger syndrome is closest to autism in signs and likely causes; Rett syndrome and childhood disintegrative disorder share several signs with autism, but may have unrelated causes; PDD not otherwise specified (PDD-NOS; also called "atypical autism") is diagnosed when the criteria are not met for a more specific disorder. Unlike with autism, people with Asperger syndrome have no substantial delay in language development. The terminology of autism can be bewildering, with autism, Asperger syndrome and PDD-NOS often called the "autism spectrum disorders" (ASD) or sometimes the "autistic disorders", whereas autism itself is often called "autistic disorder", "childhood autism", or "infantile autism". In this article, "autism" refers to the classic autistic disorder; in clinical practice, though, "autism", "ASD", and "PDD" are often used interchangeably. ASD, in turn, is a subset of the broader autism phenotype, which describes individuals who may not have ASD but do have autistic-like traits, such as avoiding eye contact. Autism can also be divided into syndromal and non-syndromal autism; the syndromal autism is associated with severe or profound intellectual disability or a congenital syndrome with physical symptoms, such as tuberous sclerosis. Although individuals with Asperger syndrome tend to perform better cognitively than those with autism, the extent of the overlap between Asperger syndrome, HFA, and non-syndromal autism is unclear. Some studies have reported diagnoses of autism in children due to a loss of language or social skills, as opposed to a failure to make progress, typically from 15 to 30 months of age. The validity of this distinction remains controversial; it is possible that regressive autism is a specific subtype, or that there is a continuum of behaviors between autism with and without regression. Research into causes has been hampered by the inability to identify biologically meaningful subgroups within the autistic population and by the traditional boundaries between the disciplines of psychiatry, psychology, neurology and pediatrics. Newer technologies such as fMRI and diffusion tensor imaging can help identify biologically relevant phenotypes (observable traits) that can be viewed on brain scans, to help further neurogenetic studies of autism; one example is lowered activity in the fusiform face area of the brain, which is associated with impaired perception of people versus objects. It has been proposed to classify autism using genetics as well as behavior. Autism has long been thought to cover a wide spectrum, ranging from individuals with severe impairments—who may be silent, developmentally disabled, and prone to frequent repetitive behavior such as hand flapping and rocking—to high functioning individuals who may have active but distinctly odd social approaches, narrowly focused interests, and verbose, pedantic communication. Because the behavior spectrum is continuous, boundaries between diagnostic categories are necessarily somewhat arbitrary. Sometimes the syndrome is divided into low-, medium- or high-functioning autism (LFA, MFA, and HFA), based on IQ thresholds. Some people have called for an end to the terms "high-functioning" and "low-functioning" due to lack of nuance and the potential for a person's needs or abilities to be overlooked. About half of parents of children with ASD notice their child's unusual behaviors by age 18 months, and about four-fifths notice by age 24 months. According to an article, failure to meet any of the following milestones "is an absolute indication to proceed with further evaluations. Delay in referral for such testing may delay early diagnosis and treatment and affect the long-term outcome". The United States Preventive Services Task Force in 2016 found it was unclear if screening was beneficial or harmful among children in whom there is no concerns. The Japanese practice is to screen all children for ASD at 18 and 24 months, using autism-specific formal screening tests. In contrast, in the UK, children whose families or doctors recognize possible signs of autism are screened. It is not known which approach is more effective. Screening tools include the Modified Checklist for Autism in Toddlers (M-CHAT), the Early Screening of Autistic Traits Questionnaire, and the First Year Inventory; initial data on M-CHAT and its predecessor, the Checklist for Autism in Toddlers (CHAT), on children aged 18–30 months suggests that it is best used in a clinical setting and that it has low sensitivity (many false-negatives) but good specificity (few false-positives). It may be more accurate to precede these tests with a broadband screener that does not distinguish ASD from other developmental disorders. Screening tools designed for one culture's norms for behaviors like eye contact may be inappropriate for a different culture. Although genetic screening for autism is generally still impractical, it can be considered in some cases, such as children with neurological symptoms and dysmorphic features. While infection with rubella during pregnancy causes fewer than 1% of cases of autism, vaccination against rubella can prevent many of those cases. The main goals when treating children with autism are to lessen associated deficits and family distress, and to increase quality of life and functional independence. In general, higher IQs are correlated with greater responsiveness to treatment and improved treatment outcomes. No single treatment is best and treatment is typically tailored to the child's needs. Families and the educational system are the main resources for treatment. Services should be carried out by behavior analysts, special education teachers, speech pathologists, and licensed psychologists. Studies of interventions have methodological problems that prevent definitive conclusions about efficacy. However, the development of evidence-based interventions has advanced in recent years. Although many psychosocial interventions have some positive evidence, suggesting that some form of treatment is preferable to no treatment, the methodological quality of systematic reviews of these studies has generally been poor, their clinical results are mostly tentative, and there is little evidence for the relative effectiveness of treatment options. Intensive, sustained special education programs and behavior therapy early in life can help children acquire self-care, communication, and job skills, and often improve functioning and decrease symptom severity and maladaptive behaviors; claims that intervention by around age three years is crucial are not substantiated. While medications have not been found to help with core symptoms, they may be used for associated symptoms, such as irritability, inattention, or repetitive behavior patterns. Educational interventions often used include applied behavior analysis (ABA), developmental models, structured teaching, speech and language therapy, social skills therapy, and occupational therapy. Among these approaches, interventions either treat autistic features comprehensively, or focalize treatment on a specific area of deficit. The quality of research for early intensive behavioral intervention (EIBI)—a treatment procedure incorporating over thirty hours per week of the structured type of ABA that is carried out with very young children—is currently low, and more vigorous research designs with larger sample sizes are needed. Two theoretical frameworks outlined for early childhood intervention include structured and naturalistic ABA interventions, and developmental social pragmatic models (DSP). One interventional strategy utilizes a parent training model, which teaches parents how to implement various ABA and DSP techniques, allowing for parents to disseminate interventions themselves. Various DSP programs have been developed to explicitly deliver intervention systems through at-home parent implementation. Despite the recent development of parent training models, these interventions have demonstrated effectiveness in numerous studies, being evaluated as a probable efficacious mode of treatment. Early, intensive ABA therapy has demonstrated effectiveness in enhancing communication and adaptive functioning in preschool children; it is also well-established for improving the intellectual performance of that age group. Similarly, a teacher-implemented intervention that utilizes a more naturalistic form of ABA combined with a developmental social pragmatic approach has been found to be beneficial in improving social-communication skills in young children, although there is less evidence in its treatment of global symptoms. Neuropsychological reports are often poorly communicated to educators, resulting in a gap between what a report recommends and what education is provided. It is not known whether treatment programs for children lead to significant improvements after the children grow up, and the limited research on the effectiveness of adult residential programs shows mixed results. The appropriateness of including children with varying severity of autism spectrum disorders in the general education population is a subject of current debate among educators and researchers. Medications may be used to treat ASD symptoms that interfere with integrating a child into home or school when behavioral treatment fails. They may also be used for associated health problems, such as ADHD or anxiety. More than half of US children diagnosed with ASD are prescribed psychoactive drugs or anticonvulsants, with the most common drug classes being antidepressants, stimulants, and antipsychotics. The atypical antipsychotic drugs risperidone and aripiprazole are FDA-approved for treating associated aggressive and self-injurious behaviors. However, their side effects must be weighed against their potential benefits, and people with autism may respond atypically. Side effects, for example, may include weight gain, tiredness, drooling, and aggression. SSRI antidepressants, such as fluoxetine and fluvoxamine, have been shown to be effective in reducing repetitive and ritualistic behaviors, while the stimulant medication methylphenidate is beneficial for some children with co-morbid inattentiveness or hyperactivity. There is scant reliable research about the effectiveness or safety of drug treatments for adolescents and adults with ASD. No known medication relieves autism's core symptoms of social and communication impairments. Experiments in mice have reversed or reduced some symptoms related to autism by replacing or modulating gene function, suggesting the possibility of targeting therapies to specific rare mutations known to cause autism. Although many alternative therapies and interventions are available, few are supported by scientific studies. Treatment approaches have little empirical support in quality-of-life contexts, and many programs focus on success measures that lack predictive validity and real-world relevance. Some alternative treatments may place the child at risk. The preference that children with autism have for unconventional foods can lead to reduction in bone cortical thickness with this being greater in those on casein-free diets, as a consequence of the low intake of calcium and vitamin D; however, suboptimal bone development in ASD has also been associated with lack of exercise and gastrointestinal disorders. In 2005, botched chelation therapy killed a five-year-old child with autism. Chelation is not recommended for people with ASD since the associated risks outweigh any potential benefits. Another alternative medicine practice with no evidence is CEASE therapy, a mixture of homeopathy, supplements, and 'vaccine detoxing'. Although popularly used as an alternative treatment for people with autism, as of 2018 there is no good evidence to recommend a gluten- and casein-free diet as a standard treatment. A 2018 review concluded that it may be a therapeutic option for specific groups of children with autism, such as those with known food intolerances or allergies, or with food intolerance markers. The authors analyzed the prospective trials conducted to date that studied the efficacy of the gluten- and casein-free diet in children with ASD (4 in total). All of them compared gluten- and casein-free diet versus normal diet with a control group (2 double-blind randomized controlled trials, 1 double-blind crossover trial, 1 single-blind trial). In two of the studies, whose duration was 12 and 24 months, a significant improvement in ASD symptoms (efficacy rate 50%) was identified. In the other two studies, whose duration was 3 months, no significant effect was observed. The authors concluded that a longer duration of the diet may be necessary to achieve the improvement of the ASD symptoms. Other problems documented in the trials carried out include transgressions of the diet, small sample size, the heterogeneity of the participants and the possibility of a placebo effect. In the subset of people who have gluten sensitivity there is limited evidence that suggests that a gluten-free diet may improve some autistic behaviors. There is tentative evidence that music therapy may improve social interactions, verbal communication, and non-verbal communication skills. There has been early research looking at hyperbaric treatments in children with autism. Studies on pet therapy have shown positive effects. There is no known cure. The degree of symptoms can decrease, occasionally to the extent that people lose their diagnosis of ASD; this occurs sometimes after intensive treatment and sometimes not. It is not known how often recovery happens; reported rates in unselected samples have ranged from 3% to 25%. Most children with autism acquire language by age five or younger, though a few have developed communication skills in later years. Many children with autism lack social support, future employment opportunities or self-determination. Although core difficulties tend to persist, symptoms often become less severe with age. Few high-quality studies address long-term prognosis. Some adults show modest improvement in communication skills, but a few decline; no study has focused on autism after midlife. Acquiring language before age six, having an IQ above 50, and having a marketable skill all predict better outcomes; independent living is unlikely with severe autism. Many individuals with autism face significant obstacles in transitioning to adulthood. Compared to the general population individuals with autism are more likely to be unemployed and to have never had a job. About half of people in their 20s with autism are not employed. Most recent reviews tend to estimate a prevalence of 1–2 per 1,000 for autism and close to 6 per 1,000 for ASD as of 2007. A 2016 survey in the United States reported a rate of 25 per 1,000 children for ASD. Globally, autism affects an estimated 24.8 million people , while Asperger syndrome affects a further 37.2 million. In 2012, the NHS estimated that the overall prevalence of autism among adults aged 18 years and over in the UK was 1.1%. Rates of PDD-NOS's has been estimated at 3.7 per 1,000, Asperger syndrome at roughly 0.6 per 1,000, and childhood disintegrative disorder at 0.02 per 1,000. CDC estimates about 1 out of 59 (1.7%) for 2014, an increase from 1 out of every 68 children (1.5%) for 2010. The number of reported cases of autism increased dramatically in the 1990s and early 2000s. This increase is largely attributable to changes in diagnostic practices, referral patterns, availability of services, age at diagnosis, and public awareness, though unidentified environmental risk factors cannot be ruled out. The available evidence does not rule out the possibility that autism's true prevalence has increased; a real increase would suggest directing more attention and funding toward changing environmental factors instead of continuing to focus on genetics. Boys are at higher risk for ASD than girls. The sex ratio averages 4.3:1 and is greatly modified by cognitive impairment: it may be close to 2:1 with intellectual disability and more than 5.5:1 without. Several theories about the higher prevalence in males have been investigated, but the cause of the difference is unconfirmed; one theory is that females are underdiagnosed. Although the evidence does not implicate any single pregnancy-related risk factor as a cause of autism, the risk of autism is associated with advanced age in either parent, and with diabetes, bleeding, and use of psychiatric drugs in the mother during pregnancy. The risk is greater with older fathers than with older mothers; two potential explanations are the known increase in mutation burden in older sperm, and the hypothesis that men marry later if they carry genetic liability and show some signs of autism. Most professionals believe that race, ethnicity, and socioeconomic background do not affect the occurrence of autism. Several other conditions are common in children with autism. They include: A few examples of autistic symptoms and treatments were described long before autism was named. The "Table Talk" of Martin Luther, compiled by his notetaker, Mathesius, contains the story of a 12-year-old boy who may have been severely autistic. Luther reportedly thought the boy was a soulless mass of flesh possessed by the devil, and suggested that he be suffocated, although a later critic has cast doubt on the veracity of this report. The earliest well-documented case of autism is that of Hugh Blair of Borgue, as detailed in a 1747 court case in which his brother successfully petitioned to annul Blair's marriage to gain Blair's inheritance. The Wild Boy of Aveyron, a feral child caught in 1798, showed several signs of autism; the medical student Jean Itard treated him with a behavioral program designed to help him form social attachments and to induce speech via imitation. The New Latin word "autismus" (English translation "autism") was coined by the Swiss psychiatrist Eugen Bleuler in 1910 as he was defining symptoms of schizophrenia. He derived it from the Greek word "autós" (αὐτός, meaning "self"), and used it to mean morbid self-admiration, referring to "autistic withdrawal of the patient to his fantasies, against which any influence from outside becomes an intolerable disturbance". A Soviet child psychiatrist, Grunya Sukhareva, described a similar syndrome that was published in Russian in 1925, and in German in 1926. The word "autism" first took its modern sense in 1938 when Hans Asperger of the Vienna University Hospital adopted Bleuler's terminology "autistic psychopaths" in a lecture in German about child psychology. Asperger was investigating an ASD now known as Asperger syndrome, though for various reasons it was not widely recognized as a separate diagnosis until 1981. Leo Kanner of the Johns Hopkins Hospital first used "autism" in its modern sense in English when he introduced the label "early infantile autism" in a 1943 report of 11 children with striking behavioral similarities. Almost all the characteristics described in Kanner's first paper on the subject, notably "autistic aloneness" and "insistence on sameness", are still regarded as typical of the autistic spectrum of disorders. It is not known whether Kanner derived the term independently of Asperger. Donald Triplett was the first person diagnosed with autism. He was diagnosed by Kanner after being first examined in 1938, and was labeled as "case 1". Triplett was noted for his savant abilities, particularly being able to name musical notes played on a piano and to mentally multiply numbers. His father, Oliver, described him as socially withdrawn but interested in number patterns, music notes, letters of the alphabet, and U.S. president pictures. By the age of 2, he had the ability to recite the 23rd Psalm and memorized 25 questions and answers from the Presbyterian catechism. He was also interested in creating musical chords. Kanner's reuse of "autism" led to decades of confused terminology like "infantile schizophrenia", and child psychiatry's focus on maternal deprivation led to misconceptions of autism as an infant's response to "refrigerator mothers". Starting in the late 1960s autism was established as a separate syndrome. As late as the mid-1970s there was little evidence of a genetic role in autism; while in 2007 it was believed to be one of the most heritable psychiatric conditions. Although the rise of parent organizations and the destigmatization of childhood ASD have affected how ASD is viewed, parents continue to feel social stigma in situations where their child's autistic behavior is perceived negatively, and many primary care physicians and medical specialists express some beliefs consistent with outdated autism research. It took until 1980 for the DSM-III to differentiate autism from childhood schizophrenia. In 1987, the DSM-III-R provided a checklist for diagnosing autism. In May 2013, the DSM-5 was released, updating the classification for pervasive developmental disorders. The grouping of disorders, including PDD-NOS, autism, Asperger syndrome, Rett syndrome, and CDD, has been removed and replaced with the general term of Autism Spectrum Disorders. The two categories that exist are impaired social communication and/or interaction, and restricted and/or repetitive behaviors. The Internet has helped autistic individuals bypass nonverbal cues and emotional sharing that they find difficult to deal with, and has given them a way to form online communities and work remotely. Societal and cultural aspects of autism have developed: some in the community seek a cure, while others believe that autism is simply another way of being. An autistic culture has emerged, accompanied by the autistic rights and neurodiversity movements. Events include World Autism Awareness Day, Autism Sunday, Autistic Pride Day, Autreat, and others. Organizations dedicated to promoting awareness of autism include Autistic Self Advocacy Network, Aspies For Freedom, Autism National Committee, and Autism Society of America. At the same time, some organizations, including Autism Speaks, have been condemned by disability rights organizations for failing to support autistic people. Social-science scholars study those with autism in hopes to learn more about "autism as a culture, transcultural comparisons... and research on social movements." While most autistic individuals do not have savant skills, many have been successful in their fields. The autism rights movement is a social movement within the context of disability rights that emphasizes the concept of neurodiversity, viewing the autism spectrum as a result of natural variations in the human brain rather than a disorder to be cured. The autism rights movement advocates for including greater acceptance of autistic behaviors; therapies that focus on coping skills rather than on imitating the behaviors of those without autism, and the recognition of the autistic community as a minority group. Autism rights or neurodiversity advocates believe that the autism spectrum is genetic and should be accepted as a natural expression of the human genome. This perspective is distinct from two other likewise distinct views: the medical perspective, that autism is caused by a genetic defect and should be addressed by targeting the autism gene(s), and fringe theories that autism is caused by environmental factors such as vaccines. A common criticism against autistic activists is that the majority of them are "high-functioning" or have Asperger syndrome and do not represent the views of "low-functioning" autistic people. About half of autistics are unemployed, and one third of those with graduate degrees may be unemployed. Among autistics who find work, most are employed in sheltered settings working for wages below the national minimum. While employers state hiring concerns about productivity and supervision, experienced employers of autistics give positive reports of above average memory and detail orientation as well as a high regard for rules and procedure in autistic employees. A majority of the economic burden of autism is caused by decreased earnings in the job market. Some studies also find decreased earning among parents who care for autistic children. </doc>
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_trainer_callback.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class MyTestTrainerCallback(TrainerCallback): "A callback that registers the events that goes through." def __init__(self): self.events = [] def on_init_end(self, args, state, control, **kwargs): self.events.append("on_init_end") def on_train_begin(self, args, state, control, **kwargs): self.events.append("on_train_begin") def on_train_end(self, args, state, control, **kwargs): self.events.append("on_train_end") def on_epoch_begin(self, args, state, control, **kwargs): self.events.append("on_epoch_begin") def on_epoch_end(self, args, state, control, **kwargs): self.events.append("on_epoch_end") def on_step_begin(self, args, state, control, **kwargs): self.events.append("on_step_begin") def on_step_end(self, args, state, control, **kwargs): self.events.append("on_step_end") def on_evaluate(self, args, state, control, **kwargs): self.events.append("on_evaluate") def on_predict(self, args, state, control, **kwargs): self.events.append("on_predict") def on_save(self, args, state, control, **kwargs): self.events.append("on_save") def on_log(self, args, state, control, **kwargs): self.events.append("on_log") def on_prediction_step(self, args, state, control, **kwargs): self.events.append("on_prediction_step") @require_torch class TrainerCallbackTest(unittest.TestCase): def setUp(self): self.output_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.output_dir) def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. train_dataset = RegressionDataset(length=train_len) eval_dataset = RegressionDataset(length=eval_len) config = RegressionModelConfig(a=a, b=b) model = RegressionPreTrainedModel(config) args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs) return Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=callbacks, ) def check_callbacks_equality(self, cbs1, cbs2): self.assertEqual(len(cbs1), len(cbs2)) # Order doesn't matter cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) for cb1, cb2 in zip(cbs1, cbs2): if isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1, cb2) elif isinstance(cb1, type) and not isinstance(cb2, type): self.assertEqual(cb1, cb2.__class__) elif not isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1.__class__, cb2) else: self.assertEqual(cb1, cb2) def get_expected_events(self, trainer): expected_events = ["on_init_end", "on_train_begin"] step = 0 train_dl_len = len(trainer.get_eval_dataloader()) evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs): expected_events.append("on_epoch_begin") for _ in range(train_dl_len): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save") expected_events.append("on_epoch_end") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def test_init_callback(self): trainer = self.get_trainer() expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # Callbacks passed at init are added to the default callbacks trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(MyTestTrainerCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback trainer = self.get_trainer(disable_tqdm=True) expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_add_remove_callback(self): expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] trainer = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(DefaultFlowCallback) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.pop_callback(DefaultFlowCallback) self.assertEqual(cb.__class__, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(DefaultFlowCallback) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # We can also add, pop, or remove by instance trainer = self.get_trainer() cb = trainer.callback_handler.callbacks[0] trainer.remove_callback(cb) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb1 = trainer.callback_handler.callbacks[0] cb2 = trainer.pop_callback(cb1) self.assertEqual(cb1, cb2) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(cb1) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_event_flow(self): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore", category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # Independent log/save/eval trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # A bit of everything trainer = self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="steps", ) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning") as warn_mock: trainer = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_trainer_utils.py
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np from transformers.data.data_collator import default_data_collator from transformers.testing_utils import require_accelerate, require_torch from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size from transformers.utils import is_torch_available if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset from transformers.modeling_outputs import SequenceClassifierOutput from transformers.tokenization_utils_base import BatchEncoding from transformers.trainer_pt_utils import ( DistributedLengthGroupedSampler, DistributedSamplerWithLoop, DistributedTensorGatherer, IterableDatasetShard, LabelSmoother, LengthGroupedSampler, SequentialDistributedSampler, ShardSampler, get_parameter_names, numpy_pad_and_concatenate, torch_pad_and_concatenate, ) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length self.generator = torch.Generator() def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 number = torch.rand(1, generator=self.generator).item() stop = number < self.p_stop @require_torch class TrainerUtilsTest(unittest.TestCase): def test_distributed_tensor_gatherer(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays(predictions[indices]) result = gatherer.finalize() self.assertTrue(np.array_equal(result, predictions)) # With nested tensors gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]]) result = gatherer.finalize() self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 2) self.assertTrue(isinstance(result[1], list)) self.assertEqual(len(result[1]), 2) self.assertTrue(np.array_equal(result[0], predictions)) self.assertTrue(np.array_equal(result[1][0], predictions)) self.assertTrue(np.array_equal(result[1][1], predictions)) def test_distributed_tensor_gatherer_different_shapes(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] sequence_lengths = [8, 10, 13] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays(predictions[indices, :seq_length]) result = gatherer.finalize() # Remove the extra samples added at the end for a round multiple of num processes. actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]] for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length])) # With nested tensors predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]]) result = gatherer.finalize() for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length])) self.assertTrue(np.array_equal(result[1], predictions)) # Check if works if varying seq_length is second gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]]) result = gatherer.finalize() self.assertTrue(np.array_equal(result[0], predictions)) for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length])) def test_label_smoothing(self): epsilon = 0.1 num_labels = 12 random_logits = torch.randn(4, 5, num_labels) random_labels = torch.randint(0, num_labels, (4, 5)) loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean() self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) # With a few -100 labels random_labels[0, 1] = -100 random_labels[2, 1] = -100 random_labels[2, 3] = -100 loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) # Mask the log probs with the -100 labels log_probs[0, 1] = 0.0 log_probs[2, 1] = 0.0 log_probs[2, 3] = 0.0 expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17) self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) def test_group_by_length(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices = list(LengthGroupedSampler(4, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(sorted(indices), list(range(100))) def test_group_by_length_with_dict(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append({"input_ids": input_ids}) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(sorted(indices), list(range(6))) def test_group_by_length_with_batch_encoding(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append(BatchEncoding({"input_ids": input_ids})) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(sorted(indices), list(range(6))) def test_distributed_length_grouped(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths)) indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices_process_0[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100))) def test_get_parameter_names(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) # fmt: off self.assertEqual( get_parameter_names(model, [nn.LayerNorm]), ['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias'] ) # fmt: on def test_distributed_sampler_with_loop(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0) shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1) # Set seeds shard1.set_epoch(0) shard2.set_epoch(0) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = [] for sample1, sample2 in zip(samples1, samples2): total += [sample1, sample2] self.assertEqual(set(total[:length]), set(dataset)) self.assertEqual(set(total[length:]), set(total[: (len(total) - length)])) def test_sequential_distributed_sampler(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1) # Sample samples1 = list(shard1) samples2 = list(shard2) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) # With a batch_size passed shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0): # Set the seed for the base dataset to get the proper reference. dataset.generator.manual_seed(epoch) reference = list(dataset) shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard in shards: shard.set_epoch(epoch) shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) for shard in shards: # All shards know the total number of samples self.assertEqual(shard.num_examples, len(reference)) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) # Check equivalence between IterableDataset and ShardSampler dataset.generator.manual_seed(epoch) reference = list(dataset) sampler_shards = [ ShardSampler( reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard, sampler_shard in zip(shard_lists, sampler_shards): self.assertListEqual(shard, list(sampler_shard)) def test_iterable_dataset_shard(self): dataset = RandomIterableDataset() self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42) def test_iterable_dataset_shard_with_length(self): sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i) for i in range(2) ] # Build expected shards: each process will have batches of size 4 until there is not enough elements to # form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4) expected_shards = [[], []] current_shard = 0 for i in range(0, 96, 4): expected_shards[current_shard].extend(list(range(i, i + 4))) current_shard = 1 - current_shard self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i) for i in range(2) ] # When drop_last=False, we get two last full batches by looping back to the beginning. expected_shards[0].extend(list(range(96, 100))) expected_shards[1].extend(list(range(0, 4))) self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2): shards = [ ShardSampler( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size reference = copy.copy(dataset) if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) def test_shard_sampler(self): for n_elements in [64, 123]: dataset = list(range(n_elements)) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3) @require_accelerate def test_executable_batch_size(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) if batch_size > 16: raise RuntimeError("CUDA out of memory.") mock_training_loop_function() self.assertEqual(batch_sizes, [64, 32, 16]) @require_accelerate def test_executable_batch_size_no_search(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) mock_training_loop_function() self.assertEqual(batch_sizes, [64]) @require_accelerate def test_executable_batch_size_with_error(self): @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): raise RuntimeError("CUDA out of memory.") with self.assertRaises(RuntimeError) as cm: mock_training_loop_function() self.assertEqual("CUDA out of memory", cm.args[0]) def test_pad_and_concatenate_with_1d(self): """Tests whether pad_and_concatenate works with scalars.""" array1 = 1.0 array2 = 2.0 result = numpy_pad_and_concatenate(array1, array2) self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result)) tensor1 = torch.tensor(1.0) tensor2 = torch.tensor(2.0) result = torch_pad_and_concatenate(tensor1, tensor2) self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0]))) def test_remove_columns_collator(self): class MockLogger: def __init__(self) -> None: self.called = 0 def info(self, msg): self.called += 1 self.last_msg = msg data_batch = [ {"col1": 1, "col2": 2, "col3": 3}, {"col1": 1, "col2": 2, "col3": 3}, ] logger = MockLogger() remove_columns_collator = RemoveColumnsCollator( default_data_collator, ["col1", "col2"], logger, "model", "training" ) self.assertNotIn("col3", remove_columns_collator(data_batch)) # check that the logging message is printed out only once remove_columns_collator(data_batch) remove_columns_collator(data_batch) self.assertEqual(logger.called, 1) self.assertIn("col3", logger.last_msg)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_trainer_seq2seq.py
# coding=utf-8 # Copyright 2020 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import ( AutoModelForSeq2SeqLM, BertTokenizer, DataCollatorForSeq2Seq, EncoderDecoderModel, GenerationConfig, Seq2SeqTrainer, Seq2SeqTrainingArguments, T5Tokenizer, ) from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class Seq2seqTrainerTester(TestCasePlus): @slow @require_torch def test_finetune_bert2bert(self): bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size bert2bert.config.eos_token_id = tokenizer.sep_token_id bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.max_length = 128 train_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]") val_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]") train_dataset = train_dataset.select(range(32)) val_dataset = val_dataset.select(range(16)) batch_size = 4 def _map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all(len(x) == 512 for x in inputs.input_ids) assert all(len(x) == 128 for x in outputs.input_ids) return batch def _compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str) return {"accuracy": accuracy} # map train dataset train_dataset = train_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) output_dir = self.get_auto_remove_tmp_dir() training_args = Seq2SeqTrainingArguments( output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, evaluation_strategy="steps", do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2, ) # instantiate trainer trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, ) # start training trainer.train() @slow @require_torch def test_return_sequences(self): # Tests that the number of generated sequences is correct when num_return_sequences > 1 # and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics` INPUT_COLUMN = "question" TARGET_COLUMN = "answer" MAX_INPUT_LENGTH = 256 MAX_TARGET_LENGTH = 256 dataset = datasets.load_dataset("gsm8k", "main", split="train[:38]") model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") tokenizer = T5Tokenizer.from_pretrained("t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig.from_pretrained( "t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5 ) training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True) trainer = Seq2SeqTrainer( model=model, args=training_args, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) def prepare_data(examples): # Remove pairs where at least one record is none inputs = examples[INPUT_COLUMN] targets = examples[TARGET_COLUMN] model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True) labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN]) dataset_len = len(prepared_dataset) # 38 for num_return_sequences in range(3, 0, -1): gen_config.num_return_sequences = num_return_sequences metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config) assert ( metrics["eval_samples"] == dataset_len * num_return_sequences ), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}"
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_trainer_distributed.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict import numpy as np from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_multi_xpu, require_torch_neuroncore, require_torch_npu, ) from transformers.training_args import ParallelMode from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset, IterableDataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result class TestTrainerDistributedNeuronCore(TestCasePlus): @require_torch_neuroncore def test_trainer(self): distributed_args = f"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class TestTrainerDistributedNPU(TestCasePlus): @require_torch_npu def test_trainer(self): distributed_args = f"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class TestTrainerDistributed(TestCasePlus): @require_torch_multi_gpu def test_trainer(self): distributed_args = f"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call @require_torch_multi_xpu class TestTrainerDistributedXPU(TestCasePlus): def test_trainer(self): distributed_args = f"""--nproc_per_node={torch.xpu.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() output_dir = self.get_auto_remove_tmp_dir() args = f"--output_dir {output_dir}".split() cmd = ["torchrun"] + distributed_args + args execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py parser = HfArgumentParser((TrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None # Check that `dispatch_batches=False` will work on a finite iterable dataset train_dataset = FiniteIterableDataset(label_names=["labels", "extra"], length=1) model = RegressionModel() training_args.per_device_train_batch_size = 1 training_args.max_steps = 1 training_args.dispatch_batches = False trainer = Trainer(model, training_args, train_dataset=train_dataset) trainer.train()
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_trainer.py
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import gc import json import math import os import random import re import subprocess import sys import tempfile import unittest from itertools import product from pathlib import Path from typing import Dict, List from unittest.mock import Mock, patch import numpy as np from huggingface_hub import HfFolder, delete_repo, list_repo_commits, list_repo_files from parameterized import parameterized from requests.exceptions import HTTPError from transformers import ( AutoTokenizer, IntervalStrategy, PretrainedConfig, TrainerCallback, TrainingArguments, get_polynomial_decay_schedule_with_warmup, is_torch_available, logging, ) from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS from transformers.testing_utils import ( ENDPOINT_STAGING, TOKEN, USER, CaptureLogger, TestCasePlus, backend_device_count, execute_subprocess_async, get_gpu_count, get_tests_dir, is_staging_test, require_accelerate, require_intel_extension_for_pytorch, require_optuna, require_ray, require_safetensors, require_sentencepiece, require_sigopt, require_tensorboard, require_tokenizers, require_torch, require_torch_accelerator, require_torch_bf16, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, require_torch_non_multi_gpu, require_torch_tensorrt_fx, require_torch_tf32, require_torch_up_to_2_accelerators, require_torchdynamo, require_wandb, slow, torch_device, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend, get_last_checkpoint from transformers.training_args import OptimizerNames from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, is_apex_available, is_bitsandbytes_available, is_safetensors_available, is_torchdistx_available, ) from transformers.utils.hp_naming import TrialShortNamer if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset import transformers.optimization from transformers import ( AutoModelForSequenceClassification, EarlyStoppingCallback, GlueDataset, GlueDataTrainingArguments, GPT2Config, GPT2LMHeadModel, LineByLineTextDataset, PreTrainedModel, Trainer, TrainerState, ) from transformers.modeling_utils import unwrap_model if is_safetensors_available(): import safetensors.torch PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt" class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result @dataclasses.dataclass class RegressionTrainingArguments(TrainingArguments): a: float = 0.0 b: float = 0.0 keep_report_to: bool = False def __post_init__(self): super().__post_init__() # save resources not dealing with reporting unless specified (also avoids the warning when it's not set) # can be explicitly disabled via `keep_report_to` if not self.keep_report_to: self.report_to = [] class RepeatDataset: def __init__(self, x, length=64): self.x = x self.length = length def __len__(self): return self.length def __getitem__(self, i): return {"input_ids": self.x, "labels": self.x} class DynamicShapesDataset: def __init__(self, length=64, seed=42, batch_size=8): self.length = length np.random.seed(seed) sizes = np.random.randint(1, 20, (length // batch_size,)) # For easy batching, we make every batch_size consecutive samples the same size. self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)] self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)] def __len__(self): return self.length def __getitem__(self, i): return {"input_x": self.xs[i], "labels": self.ys[i]} class AlmostAccuracy: def __init__(self, thresh=0.25): self.thresh = thresh def __call__(self, eval_pred): predictions, labels = eval_pred true = np.abs(predictions - labels) <= self.thresh return {"accuracy": true.astype(np.float32).mean().item()} class RegressionModelConfig(PretrainedConfig): def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs): super().__init__(**kwargs) self.a = a self.b = b self.double_output = double_output self.random_torch = random_torch self.hidden_size = 1 if is_torch_available(): class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class MultiLoader: def __init__(self, loaders): self.loaders = loaders def __len__(self): return sum(len(loader) for loader in self.loaders) def __iter__(self): for loader in self.loaders: yield from loader class CustomDataloaderTrainer(Trainer): def get_train_dataloader(self): dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()] return MultiLoader(dataloaders) def get_eval_dataloader(self, eval_dataset): dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)] return MultiLoader(dataloaders) class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionDictModel(nn.Module): def __init__(self, a=0, b=0): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b result = {"output": y} if labels is not None: result["loss"] = nn.functional.mse_loss(y, labels) return result class RegressionPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)]) self.head = nn.Linear(config.hidden_size, 1) self.gradient_checkpointing = False self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x.unsqueeze(0) for layer in self.layers: if self.training and self.gradient_checkpointing: outputs = self._gradient_checkpointing_func(layer.__call__, y) else: outputs = layer(y) y = outputs * 3 logits = self.head(y) if labels is None: return (logits, logits) if self.double_output else (logits,) loss = nn.functional.mse_loss(logits, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionRandomPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.random_torch = config.random_torch def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if self.random_torch: torch_rand = torch.randn(1).squeeze() np_rand = np.random.rand() rand_rand = random.random() if self.random_torch: y += 0.05 * torch_rand y += 0.05 * torch.tensor(np_rand + rand_rand) if labels is None: return (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) def get_regression_trainer( a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, **kwargs ): label_names = kwargs.get("label_names", None) gradient_checkpointing = kwargs.get("gradient_checkpointing", False) train_dataset = RegressionDataset(length=train_len, label_names=label_names) eval_dataset = RegressionDataset(length=eval_len, label_names=label_names) model_init = kwargs.pop("model_init", None) if model_init is not None: model = None else: if pretrained: config = RegressionModelConfig(a=a, b=b, double_output=double_output) # We infer the correct model class if one uses gradient_checkpointing or not target_cls = ( RegressionPreTrainedModel if not gradient_checkpointing else RegressionPreTrainedModelWithGradientCheckpointing ) model = target_cls(config) else: model = RegressionModel(a=a, b=b, double_output=double_output) compute_metrics = kwargs.pop("compute_metrics", None) data_collator = kwargs.pop("data_collator", None) optimizers = kwargs.pop("optimizers", (None, None)) output_dir = kwargs.pop("output_dir", "./regression") preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None) args = RegressionTrainingArguments(output_dir, a=a, b=b, keep_report_to=keep_report_to, **kwargs) return Trainer( model, args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, optimizers=optimizers, model_init=model_init, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) class TrainerIntegrationCommon: def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=True): weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"] if is_pretrained: file_list.append("config.json") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint)) for filename in file_list: self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename))) def check_best_model_has_been_loaded( self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True ): checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}") log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history values = [d[metric] for d in log_history] best_value = max(values) if greater_is_better else min(values) best_checkpoint = (values.index(best_value) + 1) * freq checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}") if is_pretrained: best_model = RegressionPreTrainedModel.from_pretrained(checkpoint) best_model.to(trainer.args.device) else: best_model = RegressionModel() if not safe_weights: state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME)) else: state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME)) best_model.load_state_dict(state_dict) best_model.to(trainer.args.device) self.assertTrue(torch.allclose(best_model.a, trainer.model.a)) self.assertTrue(torch.allclose(best_model.b, trainer.model.b)) metrics = trainer.evaluate() self.assertEqual(metrics[metric], best_value) def check_trainer_state_are_the_same(self, trainer_state, trainer_state1): # We'll pop things so operate on copies. state = trainer_state.copy() state1 = trainer_state1.copy() # Log history main contain different logs for the time metrics (after resuming a training). log_history = state.pop("log_history", None) log_history1 = state1.pop("log_history", None) self.assertEqual(state, state1) skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"] for log, log1 in zip(log_history, log_history1): for key in skip_log_keys: _ = log.pop(key, None) _ = log1.pop(key, None) self.assertEqual(log, log1) def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True): # Converts a checkpoint of a regression model to a sharded checkpoint. if load_safe: loader = safetensors.torch.load_file weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME) else: loader = torch.load weights_file = os.path.join(folder, WEIGHTS_NAME) if save_safe: extension = "safetensors" saver = safetensors.torch.save_file index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) shard_name = SAFE_WEIGHTS_NAME else: extension = "bin" saver = torch.save index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) shard_name = WEIGHTS_NAME state_dict = loader(weights_file) os.remove(weights_file) keys = list(state_dict.keys()) shard_files = [ shard_name.replace(f".{extension}", f"-{idx+1:05d}-of-{len(keys):05d}.{extension}") for idx in range(len(keys)) ] index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}} with open(index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) for param_name, shard_file in zip(keys, shard_files): saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file)) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon): """ Only tests that want to tap into the auto-pre-run 2 trainings: - self.default_trained_model - self.alternate_trained_model directly, or via check_trained_model """ def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.default_trained_model = (trainer.model.a, trainer.model.b) trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.alternate_trained_model = (trainer.model.a, trainer.model.b) def check_trained_model(self, model, alternate_seed=False): # Checks a training seeded with learning_rate = 0.1 (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model self.assertTrue(torch.allclose(model.a, a)) self.assertTrue(torch.allclose(model.b, b)) def test_reproducible_training(self): # Checks that training worked, model trained and seed made a reproducible training. trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.check_trained_model(trainer.model) # Checks that a different seed gets different (reproducible) results. trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_trainer_with_datasets(self): import datasets np.random.seed(42) x = np.random.normal(size=(64,)).astype(np.float32) y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y}) # Base training. Should have the same results as test_reproducible_training model = RegressionModel() args = TrainingArguments("./regression", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Can return tensors. train_dataset.set_format(type="torch", dtype=torch.float32) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Adding one column not used by the model should have no impact z = np.random.normal(size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z}) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) def test_model_init(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression", learning_rate=0.1) trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel()) trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results. trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results and new seed should be used. trainer.args.seed = 314 trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_gradient_accumulation(self): # Training with half the batch size but accumulation steps as 2 should give the same results. trainer = get_regression_trainer( gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1 ) trainer.train() self.check_trained_model(trainer.model) def test_gradient_checkpointing(self): trainer = get_regression_trainer( per_device_train_batch_size=1, learning_rate=0.1, gradient_checkpointing=True, gradient_checkpointing_kwargs={"use_reentrant": False}, ) previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()} trainer.train() # Check if model weights have been updated for k, v in trainer.model.named_parameters(): self.assertFalse( torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4), f"Model weights for {k} have not been updated", ) def test_training_loss(self): n_gpus = max(1, backend_device_count(torch_device)) # With even logs trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus)) trainer.train() log_history = trainer.state.log_history losses = [log["loss"] for log in log_history if "loss" in log] train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4) # With uneven logs trainer = get_regression_trainer(logging_steps=5) trainer.train() log_history = trainer.state.log_history # Training loss should be the same as before new_train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(train_loss, new_train_loss, places=4) def test_custom_optimizer(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression") model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0) def test_lr_scheduler_kwargs(self): # test scheduler kwargs passed via TrainingArguments train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"power": 5.0, "lr_end": 1e-5} # Non-default arguments args = TrainingArguments( "./regression", lr_scheduler_type="polynomial", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # Checking that the scheduler was created self.assertIsNotNone(trainer.lr_scheduler) # Checking that the correct args were passed sched1 = trainer.lr_scheduler sched2 = get_polynomial_decay_schedule_with_warmup( trainer.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_steps, **extra_kwargs ) self.assertEqual(sched1.lr_lambdas[0].args, sched2.lr_lambdas[0].args) self.assertEqual(sched1.lr_lambdas[0].keywords, sched2.lr_lambdas[0].keywords) def test_reduce_lr_on_plateau_args(self): # test passed arguments for a custom ReduceLROnPlateau scheduler train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", evaluation_strategy="epoch", metric_for_best_model="eval_loss", ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, optimizers=(optimizer, lr_scheduler) ) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) self.assertEqual(trainer.lr_scheduler.factor, 0.2) self.assertEqual(trainer.lr_scheduler.patience, 5) self.assertEqual(trainer.lr_scheduler.cooldown, 2) def test_reduce_lr_on_plateau(self): # test the ReduceLROnPlateau scheduler class TrainerWithLRLogs(Trainer): def log(self, logs): # the LR is computed after metrics and does not exist for the first epoch if hasattr(self.lr_scheduler, "_last_lr"): logs["learning_rate"] = self.lr_scheduler._last_lr[0] super().log(logs) train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", lr_scheduler_type="reduce_lr_on_plateau", evaluation_strategy="epoch", metric_for_best_model="eval_loss", num_train_epochs=10, learning_rate=0.2, ) model = RegressionModel() trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) patience = trainer.lr_scheduler.patience logs = trainer.state.log_history[1:] best_loss = logs[0]["eval_loss"] bad_epochs = 0 for i, log in enumerate(logs[:-1]): # Compare learning rate to next epoch's loss = log["eval_loss"] just_decreased = False if loss > best_loss: bad_epochs += 1 if bad_epochs > patience: self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"]) just_decreased = True bad_epochs = 0 else: best_loss = loss bad_epochs = 0 if not just_decreased: self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"]) def test_adafactor_lr_none(self): # test the special case where lr=None, since Trainer can't not have lr_scheduler from transformers.optimization import Adafactor, AdafactorSchedule train_dataset = RegressionDataset() args = TrainingArguments("./regression") model = RegressionModel() optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0) @require_torch_accelerator @require_torch_bf16 def test_mixed_bf16(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, bf16=True) trainer.train() self.check_trained_model(trainer.model) # --bf16 --half_precision_backend apex can't be used together with self.assertRaises(ValueError): trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend="apex") # will add more specific tests once there are some bugs to fix @require_torch_gpu @require_torch_tf32 def test_tf32(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, tf32=True) trainer.train() self.check_trained_model(trainer.model) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_trainer_works_with_dict(self): # Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break # anything. train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments("./regression") trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_evaluation_with_keys_to_drop(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) eval_dataset = RepeatDataset(x) args = TrainingArguments("./test") trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset) # By default the past_key_values are removed result = trainer.predict(eval_dataset) self.assertTrue(isinstance(result.predictions, np.ndarray)) # We can still get them by setting ignore_keys to [] result = trainer.predict(eval_dataset, ignore_keys=[]) self.assertTrue(isinstance(result.predictions, tuple)) self.assertEqual(len(result.predictions), 2) def test_training_arguments_are_left_untouched(self): trainer = get_regression_trainer() trainer.train() args = TrainingArguments("./regression", report_to=[]) dict1, dict2 = args.to_dict(), trainer.args.to_dict() for key in dict1.keys(): # Logging dir can be slightly different as they default to something with the time. if key != "logging_dir": self.assertEqual(dict1[key], dict2[key]) def test_number_of_steps_in_training(self): # Regular training has n_epochs * len(train_dl) steps trainer = get_regression_trainer(learning_rate=0.1) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer(learning_rate=0.1, max_steps=10) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @require_torch_bf16 @require_intel_extension_for_pytorch def test_number_of_steps_in_training_with_ipex(self): for mix_bf16 in [True, False]: # Regular training has n_epochs * len(train_dl) steps trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, use_cpu=True) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / trainer.args.train_batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer( learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / trainer.args.train_batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer( learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) def test_neftune(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4 ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.model = trainer._activate_neftune(trainer.model) dummy_input = torch.LongTensor([[1, 0, 1]]).to(torch_device) emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertFalse(torch.allclose(emb1, emb2), "Neftune noise is not applied!") # redefine the model tiny_gpt2 = GPT2LMHeadModel(config) # Trainer without inf/nan filter args = TrainingArguments( "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4 ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() # Make sure forward pass works fine _ = trainer.model(dummy_input) self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0) trainer.model.eval() # Check that we get identical embeddings just in case emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertTrue(torch.allclose(emb1, emb2), "Neftune noise is still applied!") def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_no_filter = trainer.state.log_history # Trainer with inf/nan filter args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_filter = trainer.state.log_history def is_any_loss_nan_or_inf(log_history): losses = [l["loss"] for l in log_history[:-1]] return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses) self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter)) self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): n_gpu = max(1, backend_device_count(torch_device)) trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16 * n_gpu) # Check drop_last works trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32 ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1) trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, dataloader_drop_last=True, ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu)) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu)) # Check passing a new dataset for evaluation works new_eval_dataset = RegressionDataset(length=128) self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu)) # tests that we do not require dataloader to have a .dataset attribute def test_dataloader_without_dataset(self): train_dataset = RegressionDataset(length=128) trainer = CustomDataloaderTrainer( model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset ) trainer.train() trainer.evaluate() @require_torch_multi_accelerator def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() # Make the Trainer believe it's a parallelized model model.is_parallelizable = True model.model_parallel = True args = TrainingArguments("./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16) trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset()) # Check the Trainer was fooled self.assertTrue(trainer.is_model_parallel) self.assertEqual(trainer.args.n_gpu, 1) # The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16) def test_evaluate(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, jit_mode_eval=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) @require_torch_bf16 @require_intel_extension_for_pytorch def test_evaluate_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, eval_len=66, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, bf16=mix_bf16, use_cpu=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict(self): trainer = get_regression_trainer(a=1.5, b=2.5) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"]) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], jit_mode_eval=True ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) @require_torch_bf16 @require_intel_extension_for_pytorch def test_predict_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, use_cpu=True ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], use_ipex=True, bf16=mix_bf16, use_cpu=True, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_dynamic_shapes(self): eval_dataset = DynamicShapesDataset(batch_size=self.batch_size) model = RegressionModel(a=2, b=1) args = TrainingArguments("./regression") trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) # Same tests with eval accumulation args = TrainingArguments("./regression", eval_accumulation_steps=2) trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) def test_log_level(self): # testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere) logger = logging.get_logger() log_info_string = "Running training" # test with the default log_level - should be the same as before and thus we test depending on is_info is_info = logging.get_verbosity() <= 20 with CaptureLogger(logger) as cl: trainer = get_regression_trainer() trainer.train() if is_info: self.assertIn(log_info_string, cl.out) else: self.assertNotIn(log_info_string, cl.out) # test with low log_level - lower than info with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="debug") trainer.train() self.assertIn(log_info_string, cl.out) # test with high log_level - should be quiet with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="error") trainer.train() self.assertNotIn(log_info_string, cl.out) def test_save_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size)) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False) def test_save_checkpoints_is_atomic(self): class UnsaveableTokenizer(PreTrainedTokenizerBase): def save_pretrained(self, *args, **kwargs): raise OSError("simulated file write error") with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5) # Attach unsaveable tokenizer to partially fail checkpointing trainer.tokenizer = UnsaveableTokenizer() with self.assertRaises(OSError) as _context: trainer.train() assert get_last_checkpoint(tmpdir) is None @require_safetensors def test_safe_checkpoints(self): for save_safetensors in [True, False]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, save_safetensors=save_safetensors) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), safe_weights=save_safetensors ) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, save_steps=5, pretrained=False, save_safetensors=save_safetensors ) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors ) @require_torch_multi_accelerator def test_run_seq2seq_double_train_wrap_once(self): # test that we don't wrap the model more than once # since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for # example DataParallel(DataParallel(model)) trainer = get_regression_trainer() trainer.train() model_wrapped_before = trainer.model_wrapped trainer.train() model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice") @require_torch_up_to_2_accelerators def test_can_resume_training(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "logging_steps": 5, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmpdir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "pretrained": False, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmpdir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check failures # 1. fail to find a bogus checkpoint trainer = get_regression_trainer() with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") self.assertTrue("Can't find a valid checkpoint at" in str(context.exception)) # 2. fail to find any checkpoint - due a fresh output_dir output_dir2 = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=output_dir2) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) def test_resume_training_with_randomness(self): # For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used # in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes # GPU 0 will call first and sometimes GPU 1). random_torch = not torch.cuda.is_available() or torch.cuda.device_count() <= 1 if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() with self.subTest("Test every step"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15")) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) with self.subTest("Test every epoch"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")] # There should be one checkpoint per epoch. self.assertEqual(len(checkpoints), 3) checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0] trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) @slow @require_accelerate @require_torch_non_multi_accelerator def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification") ) sys.path.append(SRC_DIR) import run_glue with tempfile.TemporaryDirectory() as tmpdir: testargs = f""" run_glue.py --model_name_or_path distilbert-base-uncased --task_name mrpc --do_train --do_eval --max_seq_len 128 --per_device_train_batch_size 4096 --learning_rate 2e-5 --num_train_epochs 1 --output_dir {tmpdir} --auto_find_batch_size 0 """.split() with self.assertRaises(RuntimeError): with patch.object(sys, "argv", testargs): run_glue.main() testargs[-1] = "1" with patch.object(sys, "argv", testargs): run_glue.main() def test_auto_batch_size_with_resume_from_checkpoint(self): train_dataset = RegressionDataset(length=128) config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() class MockCudaOOMCallback(TrainerCallback): def on_step_end(self, args, state, control, **kwargs): # simulate OOM on the first step if state.train_batch_size == 16: raise RuntimeError("CUDA out of memory.") args = RegressionTrainingArguments( tmp_dir, do_train=True, max_steps=2, save_steps=1, per_device_train_batch_size=16, auto_find_batch_size=True, ) trainer = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()]) trainer.train() # After `auto_find_batch_size` is ran we should now be at 8 self.assertEqual(trainer._train_batch_size, 8) # We can then make a new Trainer trainer = Trainer(model, args, train_dataset=train_dataset) # Check we are at 16 to start self.assertEqual(trainer._train_batch_size, 16) trainer.train(resume_from_checkpoint=True) # We should be back to 8 again, picking up based upon the last ran Trainer self.assertEqual(trainer._train_batch_size, 8) # regression for this issue: https://github.com/huggingface/transformers/issues/12970 def test_training_with_resume_from_checkpoint_false(self): train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=False) @require_torch_up_to_2_accelerators def test_resume_training_with_shard_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint) # Reinitialize trainer trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_safetensors @require_torch_up_to_2_accelerators def test_resume_training_with_safe_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). for initial_safe in [False, True]: for loaded_safe in [False, True]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=initial_safe, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint, load_safe=initial_safe, save_safe=loaded_safe) # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=loaded_safe ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_gradient_accumulation(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_frozen_params(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train(resume_from_checkpoint=checkpoint) self.assertFalse(trainer.model.a.requires_grad) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_load_best_model_at_end(self): total = int(self.n_epochs * 64 / self.batch_size) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss") with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total) self.check_best_model_has_been_loaded( tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True ) # Test this works with a non PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, pretrained=False, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False) @require_safetensors def test_load_best_model_from_safetensors(self): total = int(self.n_epochs * 64 / self.batch_size) for save_safetensors, pretrained in product([False, True], [False, True]): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, evaluation_strategy="steps", save_steps=5, load_best_model_at_end=True, save_safetensors=save_safetensors, pretrained=pretrained, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained, safe_weights=save_safetensors) self.check_best_model_has_been_loaded( tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained, safe_weights=save_safetensors ) @slow def test_trainer_eval_mrpc(self): MODEL_ID = "bert-base-cased-finetuned-mrpc" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) data_args = GlueDataTrainingArguments( task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True ) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") training_args = TrainingArguments(output_dir="./examples", use_cpu=True) trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result["eval_loss"], 0.2) @slow def test_trainer_eval_lm(self): MODEL_ID = "distilroberta-base" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) self.assertEqual(len(dataset), 31) def test_training_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact train_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples", max_steps=4) trainer = Trainer(model=model, args=args, train_dataset=train_dataset) trainer.train() self.assertEqual(trainer.state.global_step, 4) loader = trainer.get_train_dataloader() self.assertIsInstance(loader, torch.utils.data.DataLoader) self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler) def test_evaluation_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact eval_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size eval_dataset = SampleIterableDataset(length=66) results = trainer.evaluate(eval_dataset) x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) preds = trainer.predict(trainer.eval_dataset).predictions x = eval_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size # Adding one column not used by the model should have no impact test_dataset = SampleIterableDataset(length=66, label_names=["labels", "extra"]) preds = trainer.predict(test_dataset).predictions x = test_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) def test_num_train_epochs_in_training(self): # len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given. # It should give 1 update step for each epoch. trainer = get_regression_trainer( max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5 ) train_output = trainer.train() self.assertEqual(train_output.global_step, 3) # Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if # len(train_dl) < gradient_accumulation_steps. trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(self.n_epochs)) def test_early_stopping_callback(self): # early stopping stops training before num_training_epochs with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=True, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertLess(train_output.global_step, 20 * 64 / 16) # Invalid inputs to trainer with early stopping callback result in assertion error with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, evaluation_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1)) self.assertEqual(trainer.state.global_step, 0) try: trainer.train() except AssertionError: self.assertEqual(trainer.state.global_step, 0) def test_flos_extraction(self): trainer = get_regression_trainer(learning_rate=0.1) def assert_flos_extraction(trainer, wrapped_model_to_check): self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check)) self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0) # with plain model assert_flos_extraction(trainer, trainer.model) # with enforced DataParallel assert_flos_extraction(trainer, nn.DataParallel(trainer.model)) trainer.train() self.assertTrue(isinstance(trainer.state.total_flos, float)) def check_checkpoint_deletion(self, trainer, output_dir, expected): # Make fake checkpoints for n in [5, 10, 15, 20, 25]: os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True) trainer._rotate_checkpoints(output_dir=output_dir) glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")] values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints] self.assertSetEqual(set(values), set(expected)) def test_checkpoint_rotation(self): with tempfile.TemporaryDirectory() as tmp_dir: # Without best model at end trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2) self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25]) # With best model at end trainer = get_regression_trainer( output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=2 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) # Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume # from checkpoint trainer = get_regression_trainer( output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=1 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25") self.check_checkpoint_deletion(trainer, tmp_dir, [25]) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func("init_mem_cpu_alloc_delta", metrics) check_func("train_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("init_mem_gpu_alloc_delta", metrics) check_func("train_mem_gpu_alloc_delta", metrics) metrics = trainer.evaluate() check_func("eval_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("eval_mem_gpu_alloc_delta", metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func("test_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("test_mem_gpu_alloc_delta", metrics) def test_mem_metrics(self): # with mem metrics enabled trainer = get_regression_trainer(skip_memory_metrics=False) self.check_mem_metrics(trainer, self.assertIn) # with mem metrics disabled trainer = get_regression_trainer(skip_memory_metrics=True) self.check_mem_metrics(trainer, self.assertNotIn) @require_torch_accelerator def test_fp16_full_eval(self): # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with fp16_full_eval disabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with fp16_full_eval enabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() fp16_init = metrics["init_mem_gpu_alloc_delta"] fp16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp16_init {fp16_init}") print(f"fp16_eval {fp16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: fp16_init == close to zero self.assertLess(fp16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(fp16_eval, 27_000) # 3. relative comparison fp32 vs full fp16 # should be about half of fp16_init # perfect world: fp32_init/2 == fp16_eval self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000) @require_torch_non_multi_gpu @require_torchdynamo @require_torch_tensorrt_fx def test_torchdynamo_full_eval(self): import torchdynamo # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus # make the params are somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. Default - without TorchDynamo trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len) metrics = trainer.evaluate() original_eval_loss = metrics["eval_loss"] del trainer # 2. TorchDynamo eager trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="eager") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) del trainer torchdynamo.reset() # 3. TorchDynamo nvfuser trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="nvfuser") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() # 4. TorchDynamo fx2trt trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() @unittest.skip("torch 2.0.0 gives `ModuleNotFoundError: No module named 'torchdynamo'`.") @require_torch_non_multi_gpu @require_torchdynamo def test_torchdynamo_memory(self): # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu import torchdynamo class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): x = inputs["x"] output = model(x) if self.args.n_gpu == 1: return output.mean() return output class MyModule(torch.nn.Module): """Simple module that does aggressive fusion""" def __init__(self): super().__init__() def forward(self, x): for _ in range(20): x = torch.cos(x) return x mod = MyModule() # 1. without TorchDynamo (eager baseline) a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None trainer = CustomTrainer(model=mod) # warmup for _ in range(10): orig_loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() orig_loss = trainer.training_step(mod, {"x": a}) orig_peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer # 2. TorchDynamo nvfuser a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None args = TrainingArguments(output_dir="None", torchdynamo="nvfuser") trainer = CustomTrainer(model=mod, args=args) # warmup for _ in range(10): loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() loss = trainer.training_step(mod, {"x": a}) peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer # Functional check self.assertAlmostEqual(loss, orig_loss) # AOT Autograd recomputaion and nvfuser recomputation optimization # aggressively fuses the operations and reduce the memory footprint. self.assertGreater(orig_peak_mem, peak_mem * 2) @require_torch_accelerator @require_torch_bf16 def test_bf16_full_eval(self): # note: most of the logic is the same as test_fp16_full_eval # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with bf16_full_eval disabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with bf16_full_eval enabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() bf16_init = metrics["init_mem_gpu_alloc_delta"] bf16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"bf16_init {bf16_init}") print(f"bf16_eval {bf16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: bf16_init == close to zero self.assertLess(bf16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(bf16_eval, 27_000) # 3. relative comparison fp32 vs full bf16 # should be about half of bf16_init # perfect world: fp32_init/2 == bf16_eval self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000) def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) trainer = Trainer(model=model) trainer.create_optimizer_and_scheduler(10) wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] # fmt: skip wd_params = [p for n, p in model.named_parameters() if n in wd_names] no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names] self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params) self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params) @slow @require_torch_multi_accelerator def test_end_to_end_example(self): # Tests that `translation.py` will run without issues script_path = os.path.abspath( os.path.join( os.path.dirname(__file__), "..", "..", "examples", "pytorch", "translation", "run_translation.py" ) ) with tempfile.TemporaryDirectory() as tmpdir: command = [ "accelerate", "launch", script_path, "--model_name_or_path", "t5-small", "--per_device_train_batch_size", "1", "--output_dir", tmpdir, "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", "--num_train_epochs", "1", "--dataset_name", "wmt16", "--dataset_config", "ro-en", "--source_lang", "en", "--target_lang", "ro", "--do_predict", "--max_predict_samples", "64", "--predict_with_generate", "--ddp_timeout", "60", ] execute_subprocess_async(command) # successful return here == success - any errors would have caused an error or a timeout in the sub-call @require_torch @is_staging_test class TrainerIntegrationWithHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): for model in ["test-trainer", "test-trainer-epoch", "test-trainer-step", "test-trainer-tensorboard"]: try: delete_repo(token=cls._token, repo_id=model) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-trainer-org") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer"), push_to_hub=True, hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/test-trainer") model = RegressionPreTrainedModel.from_pretrained(repo_name) self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir) trainer.save_model() trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-org"), push_to_hub=True, hub_model_id="valid_org/test-trainer-org", hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, "valid_org/test-trainer-org") model = RegressionPreTrainedModel.from_pretrained("valid_org/test-trainer-org") self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def get_commit_history(self, repo): commit_logs = subprocess.run( "git log".split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding="utf-8", cwd=repo, ).stdout commits = commit_logs.split("\n\n")[1::2] return [commit.strip() for commit in commits] def test_push_to_hub_with_saves_each_epoch(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-epoch"), push_to_hub=True, hub_token=self._token, # To avoid any flakiness if the training goes faster than the uploads. hub_always_push=True, save_strategy="epoch", ) trainer.train() commits = list_repo_commits(f"{USER}/test-trainer-epoch", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) for i in range(1, 4): self.assertIn(f"Training in progress, epoch {i}", commits) def test_push_to_hub_with_saves_each_n_steps(self): num_gpus = max(1, backend_device_count(torch_device)) if num_gpus > 2: return with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-step"), push_to_hub=True, hub_token=self._token, # To avoid any flakiness if the training goes faster than the uploads. hub_always_push=True, save_strategy="steps", save_steps=5, ) trainer.train() commits = list_repo_commits(f"{USER}/test-trainer-step", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) # max_steps depend on the number of available GPUs max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader())) for i in range(5, max_steps, 5): self.assertIn(f"Training in progress, step {i}", commits) @require_tensorboard def test_push_to_hub_with_tensorboard_logs(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-tensorboard"), hub_token=self._token, save_strategy="epoch", report_to=["tensorboard"], keep_report_to=True, ) trainer.train() # Push the runs via `push_to_hub()` trainer.push_to_hub() files = list_repo_files(f"{USER}/test-trainer-tensorboard", token=self._token) found_log = False for f in files: if len(f.split("runs")) > 1 and "events.out.tfevents" in f: found_log = True assert found_log is True, "No tensorboard log found in repo" @require_torch @require_optuna class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4) @require_torch @require_optuna class TrainerHyperParameterMultiObjectOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) def compute_objective(metrics: Dict[str, float]) -> List[float]: return metrics["eval_loss"], metrics["eval_accuracy"] with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=10, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, compute_metrics=AlmostAccuracy(), ) trainer.hyperparameter_search( direction=["minimize", "maximize"], hp_space=hp_space, hp_name=hp_name, n_trials=4, compute_objective=compute_objective, ) @require_torch @require_ray class TrainerHyperParameterRayIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def ray_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): from ray import tune return { "a": tune.randint(-4, 4), "b": tune.randint(-4, 4), } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4 ) def test_hyperparameter_search(self): self.ray_hyperparameter_search() def test_hyperparameter_search_ray_client(self): import ray from ray.util.client.ray_client_helpers import ray_start_client_server with ray_start_client_server(): assert ray.util.client.ray.is_connected() self.ray_hyperparameter_search() @slow @require_torch @require_sigopt class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return [ {"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"}, {"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"}, ] def model_init(trial): if trial is not None: a = trial.assignments["a"] b = trial.assignments["b"] else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.assignments) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4 ) optim_test_params = [] if is_torch_available(): default_adam_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "eps": TrainingArguments.adam_epsilon, "lr": TrainingArguments.learning_rate, } default_lion_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "lr": TrainingArguments.learning_rate, } default_anyprecision_kwargs = { "use_kahan_summation": False, "momentum_dtype": torch.float32, "variance_dtype": torch.float32, "compensation_buffer_dtype": torch.bfloat16, } optim_test_params = [ ( TrainingArguments(optim=OptimizerNames.ADAMW_HF, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_HF.value, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_TORCH, output_dir="None"), torch.optim.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAFACTOR, output_dir="None"), transformers.optimization.Adafactor, { "scale_parameter": False, "relative_step": False, "lr": TrainingArguments.learning_rate, }, ), ] if is_apex_available(): import apex optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), apex.optimizers.FusedAdam, default_adam_kwargs, ) ) if is_bitsandbytes_available(): import bitsandbytes as bnb optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) if is_torchdistx_available(): import torchdistx optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), torchdistx.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) ) @require_torch class TrainerOptimizerChoiceTest(unittest.TestCase): def check_optim_and_kwargs(self, training_args: TrainingArguments, expected_cls, expected_kwargs): actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args) self.assertEqual(expected_cls, actual_cls) self.assertIsNotNone(optim_kwargs) for p, v in expected_kwargs.items(): self.assertTrue(p in optim_kwargs) actual_v = optim_kwargs[p] self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.") @parameterized.expand(optim_test_params, skip_on_empty=True) def test_optim_supported(self, training_args: TrainingArguments, expected_cls, expected_kwargs): # exercises all the valid --optim options self.check_optim_and_kwargs(training_args, expected_cls, expected_kwargs) trainer = get_regression_trainer(**training_args.to_dict()) trainer.train() def test_fused_adam(self): # Pretend that apex is installed and mock apex.optimizers.FusedAdam exists. # Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam. It only has to return the # class given, so mocking apex.optimizers.FusedAdam should be fine for testing and allow # the test to run without requiring an apex installation. mock = Mock() modules = { "apex": mock, "apex.optimizers": mock.optimizers, "apex.optimizers.FusedAdam": mock.optimizers.FusedAdam, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), mock.optimizers.FusedAdam, default_adam_kwargs, ) def test_fused_adam_no_apex(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None") # Pretend that apex does not exist, even if installed. By setting apex to None, importing # apex will fail even if apex is installed. with patch.dict("sys.modules", {"apex.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_adam8bit(self): # Pretend that Bits and Bytes is installed and mock bnb.optim.Adam8bit exists. # Trainer.get_optimizer_cls_and_kwargs does not use Adam8bit. It only has to return the # class given, so mocking bnb.optim.Adam8bit should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit_alias(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_anyprecision_adamw(self): # Pretend that torchdistx is installed and mock torchdistx.optimizers.AnyPrecisionAdamW exists. # Trainer.get_optimizer_cls_and_kwargs does not use AnyPrecisioinAdamW. It only has to return the # class given, so mocking torchdistx.optimizers.AnyPrecisionAdamW should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "torchdistx": mock, "torchdistx.optimizers": mock.optimizers, "torchdistx.optimizers.AnyPrecisionAdamW.": mock.optimizers.AnyPrecisionAdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), mock.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) def test_no_torchdistx_anyprecision_adamw(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None") # Pretend that torchdistx does not exist, even if installed. By setting torchdistx to None, importing # torchdistx.optimizers will fail even if torchdistx is installed. with patch.dict("sys.modules", {"torchdistx.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) @require_torch @require_wandb class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return { "method": "random", "metric": {}, "parameters": { "a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "b": {"distribution": "int_uniform", "min": 1, "max": 6}, }, } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="wandb", n_trials=4, anonymous="must" ) class HyperParameterSearchBackendsTest(unittest.TestCase): def test_hyperparameter_search_backends(self): self.assertEqual( list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()), list(HPSearchBackend), )
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_trainer_tpu.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This test is meant to be run in on an instance with TPUs like this: # # python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py # # Replace 8 with the number of TPU cores you have. # import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids def main(): parser = HfArgumentParser((TrainingArguments,)) sys.argv += ["--output_dir", "./examples"] training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, " f"tpu_num_cores: {training_args.tpu_num_cores}", ) # Essentially, what we want to verify in the distributed case is # that we get all samples back, in the right order. # (this is crucial for prediction for instance) for dataset_length in [1001, 256, 15]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None logger.info("🔥 All distributed tests successful") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/trainer/test_data_collator.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest import numpy as np from transformers import ( BertTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, default_data_collator, is_tf_available, is_torch_available, set_seed, ) from transformers.testing_utils import require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_torch class DataCollatorIntegrationTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt") with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_default_with_dict(self): features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8))))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) # With label_ids features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) # Features can already be tensors features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features) self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8))))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 10])) # Labels can already be tensors features = [{"label": torch.tensor(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features) self.assertEqual(batch["labels"].dtype, torch.long) self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8))))) self.assertEqual(batch["labels"].dtype, torch.long) self.assertEqual(batch["inputs"].shape, torch.Size([8, 10])) def test_default_classification_and_regression(self): data_collator = default_data_collator features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)] batch = data_collator(features) self.assertEqual(batch["labels"].dtype, torch.long) features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)] batch = data_collator(features) self.assertEqual(batch["labels"].dtype, torch.float) def test_default_with_no_labels(self): features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) # With label_ids features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features) self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, torch.Size([8, 6])) def test_data_collator_with_padding(self): tokenizer = BertTokenizer(self.vocab_file) features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}] data_collator = DataCollatorWithPadding(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10])) data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8])) def test_data_collator_for_token_classification(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2], "labels": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]}, ] data_collator = DataCollatorForTokenClassification(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10])) self.assertEqual(batch["labels"].shape, torch.Size([2, 10])) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8])) self.assertEqual(batch["labels"].shape, torch.Size([2, 8])) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3) for feature in features: feature.pop("labels") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) def test_data_collator_for_token_classification_works_with_pt_tensors(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": torch.tensor([0, 1, 2]), "labels": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3, 4, 5]), "labels": torch.tensor([0, 1, 2, 3, 4, 5])}, ] data_collator = DataCollatorForTokenClassification(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10])) self.assertEqual(batch["labels"].shape, torch.Size([2, 10])) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8])) self.assertEqual(batch["labels"].shape, torch.Size([2, 8])) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, torch.Size([2, 6])) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3) for feature in features: feature.pop("labels") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6])) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) def _test_no_pad_and_pad(self, no_pad_features, pad_features): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) tokenizer._pad_token = None data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) with self.assertRaises(ValueError): # Expect error due to padding token missing data_collator(pad_features) set_seed(42) # For reproducibility tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16))) self.assertEqual(batch["labels"].shape, torch.Size((2, 16))) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(torch.any(masked_tokens)) self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) def test_data_collator_for_language_modeling(self): no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] self._test_no_pad_and_pad(no_pad_features, pad_features) no_pad_features = [list(range(10)), list(range(10))] pad_features = [list(range(5)), list(range(10))] self._test_no_pad_and_pad(no_pad_features, pad_features) def test_data_collator_for_whole_word_mask(self): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="pt") features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) # Features can already be tensors features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) def test_plm(self): tokenizer = BertTokenizer(self.vocab_file) no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] data_collator = DataCollatorForPermutationLanguageModeling(tokenizer) batch = data_collator(pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) batch = data_collator(no_pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10))) self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10))) self.assertEqual(batch["labels"].shape, torch.Size((2, 10))) example = [np.random.randint(0, 5, [5])] with self.assertRaises(ValueError): # Expect error due to odd sequence length data_collator(example) def test_nsp(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i} for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["labels"].shape, torch.Size((2, 5))) self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,))) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["labels"].shape, torch.Size((2, 8))) self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,))) def test_sop(self): tokenizer = BertTokenizer(self.vocab_file) features = [ { "input_ids": torch.tensor([0, 1, 2, 3, 4]), "token_type_ids": torch.tensor([0, 1, 2, 3, 4]), "sentence_order_label": i, } for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5))) self.assertEqual(batch["labels"].shape, torch.Size((2, 5))) self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,))) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8))) self.assertEqual(batch["labels"].shape, torch.Size((2, 8))) self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,))) @require_tf class TFDataCollatorIntegrationTest(unittest.TestCase): def setUp(self): super().setUp() self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt") with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_default_with_dict(self): features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].numpy().tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) # With label_ids features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].numpy().tolist(), ([[0, 1, 2]] * 8)) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) # Features can already be tensors features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].numpy().tolist(), (list(range(8)))) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 10]) # Labels can already be tensors features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["labels"].numpy().tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, tf.int64) self.assertEqual(batch["inputs"].shape.as_list(), [8, 10]) def test_numpy_dtype_preservation(self): data_collator = default_data_collator # Confirms that numpy inputs are handled correctly even when scalars features = [{"input_ids": np.array([0, 1, 2, 3, 4]), "label": np.int64(i)} for i in range(4)] batch = data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.int64) def test_default_classification_and_regression(self): data_collator = default_data_collator features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)] batch = data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.int64) features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)] batch = data_collator(features, return_tensors="tf") self.assertEqual(batch["labels"].dtype, tf.float32) def test_default_with_no_labels(self): features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) # With label_ids features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="tf") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape.as_list(), [8, 6]) def test_data_collator_with_padding(self): tokenizer = BertTokenizer(self.vocab_file) features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}] data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6]) self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, [2, 8]) def test_data_collator_for_token_classification(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2], "labels": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]}, ] data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6]) self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape.as_list(), [2, 6]) self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification( tokenizer, padding="max_length", max_length=10, return_tensors="tf" ) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["labels"].shape.as_list(), [2, 8]) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6]) self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape.as_list(), [2, 6]) self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-1] * 3) def _test_no_pad_and_pad(self, no_pad_features, pad_features): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) data_collator = DataCollatorForLanguageModeling( tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="tf" ) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) tokenizer._pad_token = None data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf") with self.assertRaises(ValueError): # Expect error due to padding token missing data_collator(pad_features) set_seed(42) # For reproducibility tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist())) batch = data_collator(pad_features, return_tensors="tf") self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist())) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist())) batch = data_collator(pad_features, return_tensors="tf") self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16]) self.assertEqual(batch["labels"].shape.as_list(), [2, 16]) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(tf.reduce_any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist())) def test_data_collator_for_language_modeling(self): no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] self._test_no_pad_and_pad(no_pad_features, pad_features) no_pad_features = [list(range(10)), list(range(10))] pad_features = [list(range(5)), list(range(10))] self._test_no_pad_and_pad(no_pad_features, pad_features) def test_data_collator_for_whole_word_mask(self): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="tf") features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) # Features can already be tensors features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) def test_plm(self): tokenizer = BertTokenizer(self.vocab_file) no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) batch = data_collator(no_pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10]) self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10]) self.assertEqual(batch["labels"].shape.as_list(), [2, 10]) example = [np.random.randint(0, 5, [5])] with self.assertRaises(ValueError): # Expect error due to odd sequence length data_collator(example) def test_nsp(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i} for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["labels"].shape.as_list(), [2, 5]) self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2]) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["labels"].shape.as_list(), [2, 8]) self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2]) def test_sop(self): tokenizer = BertTokenizer(self.vocab_file) features = [ { "input_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]), "token_type_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]), "sentence_order_label": i, } for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5]) self.assertEqual(batch["labels"].shape.as_list(), [2, 5]) self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2]) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8]) self.assertEqual(batch["labels"].shape.as_list(), [2, 8]) self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2]) class NumpyDataCollatorIntegrationTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt") with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_default_with_dict(self): features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 6)) # With label_ids features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].tolist(), [[0, 1, 2]] * 8) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 6)) # Features can already be tensors features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].tolist(), list(range(8))) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 10)) # Labels can already be tensors features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["labels"].tolist(), (list(range(8)))) self.assertEqual(batch["labels"].dtype, np.int64) self.assertEqual(batch["inputs"].shape, (8, 10)) def test_default_classification_and_regression(self): data_collator = default_data_collator features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)] batch = data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].dtype, np.int64) features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)] batch = data_collator(features, return_tensors="np") self.assertEqual(batch["labels"].dtype, np.float32) def test_default_with_no_labels(self): features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, (8, 6)) # With label_ids features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)] batch = default_data_collator(features, return_tensors="np") self.assertTrue("labels" not in batch) self.assertEqual(batch["inputs"].shape, (8, 6)) def test_data_collator_with_padding(self): tokenizer = BertTokenizer(self.vocab_file) features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}] data_collator = DataCollatorWithPadding(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 6)) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) def test_data_collator_for_token_classification(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2], "labels": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]}, ] data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 6)) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, (2, 6)) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3) data_collator = DataCollatorForTokenClassification( tokenizer, padding="max_length", max_length=10, return_tensors="np" ) batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) self.assertEqual(batch["labels"].shape, (2, 8)) data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 6)) self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3) self.assertEqual(batch["labels"].shape, (2, 6)) self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3) def _test_no_pad_and_pad(self, no_pad_features, pad_features): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) batch = data_collator(pad_features, return_tensors="np") self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) data_collator = DataCollatorForLanguageModeling( tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="np" ) batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) batch = data_collator(pad_features, return_tensors="np") self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) tokenizer._pad_token = None data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np") with self.assertRaises(ValueError): # Expect error due to padding token missing data_collator(pad_features) set_seed(42) # For reproducibility tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(no_pad_features) self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) batch = data_collator(pad_features) self.assertEqual(batch["input_ids"].shape, (2, 16)) self.assertEqual(batch["labels"].shape, (2, 16)) masked_tokens = batch["input_ids"] == tokenizer.mask_token_id self.assertTrue(np.any(masked_tokens)) # self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) def test_data_collator_for_language_modeling(self): no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] self._test_no_pad_and_pad(no_pad_features, pad_features) no_pad_features = [list(range(10)), list(range(10))] pad_features = [list(range(5)), list(range(10))] self._test_no_pad_and_pad(no_pad_features, pad_features) def test_data_collator_for_whole_word_mask(self): tokenizer = BertTokenizer(self.vocab_file) data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="np") features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) # Features can already be tensors features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}] batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) def test_plm(self): tokenizer = BertTokenizer(self.vocab_file) no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}] pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}] data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["perm_mask"].shape, (2, 10, 10)) self.assertEqual(batch["target_mapping"].shape, (2, 10, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) batch = data_collator(no_pad_features) self.assertIsInstance(batch, dict) self.assertEqual(batch["input_ids"].shape, (2, 10)) self.assertEqual(batch["perm_mask"].shape, (2, 10, 10)) self.assertEqual(batch["target_mapping"].shape, (2, 10, 10)) self.assertEqual(batch["labels"].shape, (2, 10)) example = [np.random.randint(0, 5, [5])] with self.assertRaises(ValueError): # Expect error due to odd sequence length data_collator(example) def test_nsp(self): tokenizer = BertTokenizer(self.vocab_file) features = [ {"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i} for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 5)) self.assertEqual(batch["token_type_ids"].shape, (2, 5)) self.assertEqual(batch["labels"].shape, (2, 5)) self.assertEqual(batch["next_sentence_label"].shape, (2,)) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) self.assertEqual(batch["token_type_ids"].shape, (2, 8)) self.assertEqual(batch["labels"].shape, (2, 8)) self.assertEqual(batch["next_sentence_label"].shape, (2,)) def test_sop(self): tokenizer = BertTokenizer(self.vocab_file) features = [ { "input_ids": np.array([0, 1, 2, 3, 4]), "token_type_ids": np.array([0, 1, 2, 3, 4]), "sentence_order_label": i, } for i in range(2) ] data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 5)) self.assertEqual(batch["token_type_ids"].shape, (2, 5)) self.assertEqual(batch["labels"].shape, (2, 5)) self.assertEqual(batch["sentence_order_label"].shape, (2,)) data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np") batch = data_collator(features) self.assertEqual(batch["input_ids"].shape, (2, 8)) self.assertEqual(batch["token_type_ids"].shape, (2, 8)) self.assertEqual(batch["labels"].shape, (2, 8)) self.assertEqual(batch["sentence_order_label"].shape, (2,))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_utils.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest import warnings import numpy as np from parameterized import parameterized from transformers import is_torch_available, pipeline, set_seed from transformers.testing_utils import ( is_flaky, require_accelerate, require_torch, require_torch_multi_accelerator, slow, torch_device, ) from ..test_modeling_common import floats_tensor, ids_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, AutoTokenizer, BartForCausalLM, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, SpeechEncoderDecoderModel, top_k_top_p_filtering, ) from transformers.cache_utils import DynamicCache from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "repetition_penalty": 1.2, "remove_invalid_values": True, } # NoRepeatNGramLogitsProcessor + forced tokens may result in no valid continuations if forced_bos_token_id is None and forced_eos_token_id is None: process_kwargs["no_repeat_ngram_size"] = 2 # NOTE: the order of operations here should match `generate` for accurate testing logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id)] + ( [NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"])] if forced_bos_token_id is None and forced_eos_token_id is None else [] ) + [RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"])] + [InfNanRemoveLogitsProcessor()] # prevent flaky generation test failures ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) @require_accelerate @require_torch_multi_accelerator def test_model_parallel_beam_search(self): for model_class in self.all_generative_model_classes: if model_class._no_split_modules is None: continue config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) new_model = model_class.from_pretrained(tmp_dir, device_map="auto") new_model.generate( input_ids, attention_mask=attention_mask, max_length=max_length, num_beams=2, ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints min_id = 3 max_id = config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints min_id = 3 max_id = model.config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_contrastive_generate_low_memory(self): # Check that choosing 'low_memory' does not change the model output for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer", "speech2text"]): self.skipTest("Won't fix: old model with different cache format") if any(model_name in model_class.__name__.lower() for model_name in ["gptbigcode"]): self.skipTest("TODO: fix me") config, input_ids, attention_mask, max_length = self._get_input_ids_and_config(batch_size=1) # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True # test output equality of low versus high memory model = model_class(config).to(torch_device).eval() low_output = model.generate( input_ids, top_k=4, penalty_alpha=0.6, low_memory=True, max_length=max_length, attention_mask=attention_mask, ) high_output = model.generate( input_ids, top_k=4, penalty_alpha=0.6, low_memory=False, max_length=max_length, attention_mask=attention_mask, ) self.assertListEqual(low_output.tolist(), high_output.tolist()) @is_flaky() # Read NOTE (1) below. If there are API issues, all attempts will fail. def test_assisted_decoding_matches_greedy_search(self): # This test ensures that the assisted generation does not introduce output changes over greedy search. # NOTE (1): The sentence above is true most of the time, there is a tiny difference in the logits due to matmul # shape differences -- and it may result in a different output. The input shape difference happens in the # main model, that runs the forward pass with several candidates at once (as opposed to generating one token at # a time). See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 for more info. # NOTE (2): It breaks the pattern in the tests above, for multiple reasons: # - assisted_decoding, contrarily to the other methods, can't be called on its own (e.g. needs to # prepare the assistant encoder outputs in the main generate body); # - assisted_decoding does not support `use_cache = False` # - assisted_decoding does not support `batch_size > 1` for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": False, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_greedy = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs.update({"assistant_model": assistant_model}) output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) # The two outputs must match and their shape must be as expected self.assertListEqual(output_greedy.sequences.tolist(), output_assisted.sequences.tolist()) for output in (output_greedy, output_assisted): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_assisted_decoding_sample(self): # In this test we don't check assisted vs non-assisted output -- seeded assisted decoding with sample will not # match sample for the same seed, as the forward pass does not return the exact same logits (due to matmul with # different shapes, see https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): self.skipTest("Won't fix: old model with different cache format") if any( model_name in model_class.__name__.lower() for model_name in [ "bigbirdpegasus", "led", "mega", "speech2text", "git", "prophetnet", "seamlessm4t", "clvp", ] ): self.skipTest("May fix in the future: need model-specific fixes") # enable cache config, input_ids, attention_mask, _ = self._get_input_ids_and_config(batch_size=1) # NOTE: assisted generation only works with cache on at the moment. if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() # Sets assisted generation arguments such that: # a) no EOS is generated, to ensure generation doesn't break early # b) the assistant model always generates two tokens when it is called, to ensure the input preparation of # the assistant model is correct # c) there are at least two forward passes in the main model, to ensure the input preparation of # the main model is correct assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 # see b) assistant_model.generation_config.num_assistant_tokens_schedule = "constant" # see b) generation_kwargs = { "eos_token_id": -1, # see a) "max_new_tokens": 4, # see c) "num_beams": 1, "do_sample": True, "assistant_model": assistant_model, "output_scores": True, "output_hidden_states": True, "output_attentions": True, "return_dict_in_generate": True, } output_assisted = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) self._check_outputs(output_assisted, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue model = model_class(config).to(torch_device) head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def test_left_padding_compatibility(self): # The check done in this test is fairly difficult -- depending on the model architecture, passing the right # position index for the position embeddings can still result in a different output, due to numerical masking. # On the other hand, for some types of position embeddings, an incorrect position index can have a minimal # impact on the output. # There are two tricks employed to check whether left-padding compatibility is in place: # 1 - To reduce the negative impact of the numerical attention mask on a correct position index, we set the # padding size to 1. # 2 - To reduce the chance of false positives (i.e. passing when it should be failing), we run the check # multiple times with random inputs, and it has to pass with all of them. # NOTE: because of 2), there is some chance of false positives in this test. for model_class in self.all_generative_model_classes: config, _, _, _ = self._get_input_ids_and_config() if config.is_encoder_decoder: continue # skip for encoder-decoder models -- they don't need left-padding compatibility model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() no_failures = True for _ in range(10): # there may be false positives with 10 runs, we rely on the CI to catch the flakiness _, input_ids, attention_mask, _ = self._get_input_ids_and_config() model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :] pad_size = (input_ids.shape[0], 1) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = {"input_ids": padded_input_ids, "attention_mask": padded_attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(padded_attention_mask, dim=-1) - 1 position_ids.masked_fill_(padded_attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids next_logits_with_padding = model(**model_kwargs).logits[:, -1, :] if not torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-7): no_failures = False break self.assertTrue(no_failures) def test_past_key_values_format(self): # Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test. Having a # standard KV cache format is important for a consistent API (and for advanced generation methods). for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") model = model_class(config).to(torch_device) if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: self.skipTest("This model doesn't return `past_key_values`") num_hidden_layers = ( getattr(config, "decoder_layers", None) or getattr(config, "num_decoder_layers", None) or config.num_hidden_layers ) num_attention_heads = getattr(config, "decoder_attention_heads", config.num_attention_heads) embed_dim = getattr(config, "d_model", config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) # Encoder-Decoder checks if config.is_encoder_decoder: encoder_num_attention_heads = config.encoder_attention_heads encoder_per_head_embed_dim = embed_dim // encoder_num_attention_heads batch_size, seq_length = inputs["decoder_input_ids"].shape for i in range(num_hidden_layers): self.assertEqual(len(past_kv[i]), 4) # K V for the decoder + K V for the encoder = 4 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) # The sequence length for the encoder K V depends on the model. Since it is not manipulated in # autoregressive generation, I'm keeping the test general and not checking the 3rd dim self.assertEqual( (past_kv[i][2].shape[0], past_kv[i][2].shape[1], past_kv[i][2].shape[3]), (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), ) self.assertEqual( (past_kv[i][3].shape[0], past_kv[i][3].shape[1], past_kv[i][3].shape[3]), (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), ) # Decoder-only checks else: # TODO: this line is only needed because of imagegpt, where "pixel_values" = "input_ids". Fix the # tests in imagegpt such that `prepare_config_and_inputs_for_common` returns the later (and the other # tests use it) key = "input_ids" if "input_ids" in inputs else "pixel_values" batch_size, seq_length = inputs[key].shape for i in range(num_hidden_layers): self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) def test_generate_from_inputs_embeds_decoder_only(self): # When supported, tests that the decoder model can generate from `inputs_embeds` instead of `input_ids` # if fails, you should probably update the `prepare_inputs_for_generation` function for model_class in self.all_generative_model_classes: config, input_ids, _, _ = self._get_input_ids_and_config() # Ignore: # a) eos (to always output 20 tokens) and pad (so we don't try to infer the attn mask from the input_ids, # which would cause a mismatch), config.pad_token_id = config.eos_token_id = -1 # b) embedding scaling, the scaling factor applied after embeding from input_ids (requires knowledge of the # variable that holds the scaling factor, which is model-dependent) if hasattr(config, "scale_embedding"): config.scale_embedding = False # This test is for decoder-only models (encoder-decoder models have native input embeddings support in the # decoder) if config.is_encoder_decoder: continue # Skip models without explicit support model = model_class(config).to(torch_device).eval() if "inputs_embeds" not in inspect.signature(model.prepare_inputs_for_generation).parameters.keys(): continue # Traditional way of generating text outputs_from_ids = model.generate(input_ids) self.assertEqual(outputs_from_ids.shape, (2, 20)) # Same thing, but from input embeddings (`input_ids` is passed so the prompt is present in the output) inputs_embeds = model.get_input_embeddings()(input_ids) outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds) self.assertListEqual(outputs_from_ids.tolist(), outputs_from_embeds.tolist()) # But if we pass different inputs_embeds, we should get different outputs torch.manual_seed(0) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate(input_ids, inputs_embeds=random_embeds) with self.assertRaises(AssertionError): self.assertListEqual(outputs_from_rand_embeds.tolist(), outputs_from_embeds.tolist()) # input_ids is not a required input -- if we don't pass it, the newly generated tokens will be the same outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, max_new_tokens=20 - inputs_embeds.shape[1] ) self.assertListEqual( outputs_from_embeds[:, inputs_embeds.shape[1] :].tolist(), outputs_from_embeds_wo_ids[:, 1:].tolist(), ) def test_generate_continue_from_past_key_values(self): # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: if any(model_name in model_class.__name__.lower() for model_name in ["imagegpt"]): self.skipTest("Won't fix: old model with unique inputs/caches/other") if any(model_name in model_class.__name__.lower() for model_name in ["umt5"]): self.skipTest("TODO: needs modeling or test input preparation fixes for compatibility") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if not hasattr(config, "use_cache"): self.skipTest("This model doesn't support caching") # Let's make it always: # 1. use cache (for obvious reasons) # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the # continuation would force it to generate beyond an EOS token) # 3. ignore `token_type_ids` for simplicity # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is # active by default on some models config.use_cache = True if "token_type_ids" in inputs: del inputs["token_type_ids"] model = model_class(config).to(torch_device) model.eval() model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None # If "past_key_values" is not returned, skip the test (e.g. RWKV uses a different cache name and format) outputs = model(**inputs) if "past_key_values" not in outputs: self.skipTest("This model doesn't return `past_key_values`") # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=3, return_dict_in_generate=True) # Continue from the tokens generated above, preparing the inputs accordingly inputs["past_key_values"] = outputs_cached.past_key_values new_attention_len = outputs_cached.sequences.shape[-1] if config.is_encoder_decoder: inputs["decoder_input_ids"] = outputs_cached.sequences if "decoder_attention_mask" in inputs: inputs["decoder_attention_mask"] = torch.nn.functional.pad( inputs["decoder_attention_mask"], (0, new_attention_len - inputs["decoder_attention_mask"].shape[1]), mode="constant", value=1, ) else: inputs["input_ids"] = outputs_cached.sequences if "attention_mask" in inputs: inputs["attention_mask"] = torch.nn.functional.pad( inputs["attention_mask"], (0, new_attention_len - inputs["attention_mask"].shape[1]), mode="constant", value=1, ) outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=1, return_dict_in_generate=True) # The two sets of generated text and past kv should be equal to each other self.assertListEqual(outputs.sequences.tolist(), outputs_cached.sequences.tolist()) for layer_idx in range(len(outputs_cached.past_key_values)): for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], outputs_cached.past_key_values[layer_idx][kv_idx], ) ) @parameterized.expand([(1, False), (1, True), (4, False)]) def test_new_cache_format(self, num_beams, do_sample): # Tests that generating with the new format is exactly the same as the legacy one (for models that support it). # 👉 tests with and without beam search so that we can test with and without cache reordering. # 👉 tests with and without sampling so we can cover the most common use cases. for model_class in self.all_generative_model_classes: if not model_class._supports_cache_class: self.skipTest("This model does not support the new cache format") config, input_ids, attention_mask, _ = self._get_input_ids_and_config() config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() generation_kwargs = { "max_new_tokens": 5, "do_sample": do_sample, "num_beams": num_beams, "num_return_sequences": num_beams, "return_dict_in_generate": True, # Required to return `past_key_values` } # Sets seed before calling `generate` for the case with do_sample=True seed = torch.randint(0, 1000000, (1,)).item() set_seed(seed) legacy_results = model.generate(input_ids, attention_mask=attention_mask, **generation_kwargs) set_seed(seed) new_results = model.generate( input_ids, attention_mask=attention_mask, past_key_values=DynamicCache(), **generation_kwargs ) # The two sets of generated sequences must match, despite the cache format between forward passes being # different self.assertListEqual(legacy_results.sequences.tolist(), new_results.sequences.tolist()) self.assertTrue(isinstance(legacy_results.past_key_values, tuple)) self.assertTrue(isinstance(new_results.past_key_values, DynamicCache)) # The contents of the two caches, when converted to the same format (in both directions!), must match legacy_cache = legacy_results.past_key_values new_cache_converted = new_results.past_key_values.to_legacy_cache() for layer_idx in range(len(legacy_cache)): for kv_idx in range(len(legacy_cache[layer_idx])): self.assertTrue( torch.allclose( legacy_cache[layer_idx][kv_idx], new_cache_converted[layer_idx][kv_idx], ) ) new_cache = new_results.past_key_values legacy_cache_converted = DynamicCache.from_legacy_cache(legacy_results.past_key_values) for layer_idx in range(len(new_cache)): for kv_idx in range(len(new_cache[layer_idx])): self.assertTrue( torch.allclose( new_cache[layer_idx][kv_idx], legacy_cache_converted[layer_idx][kv_idx], ) ) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Past Key Value States -- two notes here: # 1. Its inner sequence length is with respect to the inputs of the latest forward pass, hence the "-1" # 2. Some old models still return `output.past_key_values` even without `use_cache=True` # 3. TODO (joao): A few models have different formats, skipping those until the cache refactor is complete models_without_standard_cache = ("bloom", "ctrl", "fsmt", "gptbigcode", "mega", "reformer") has_standard_cache = not any( model_name in config.__class__.__name__.lower() for model_name in models_without_standard_cache ) if use_cache and has_standard_cache: past_key_values = output.past_key_values past_sequence_length = output.sequences.shape[-1] - 1 self._check_past_key_values_for_generate( num_sequences_in_output, past_key_values, seq_length=past_sequence_length, config=config, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config, num_beam_groups=1): self.assertIsInstance(past_key_values, tuple) self.assertListEqual( [isinstance(iter_past_key_values, tuple) for iter_past_key_values in past_key_values], [True] * len(past_key_values), ) # (batch, head, seq_length, head_features) expected_shape = ( batch_size * num_beam_groups, config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads, seq_length, config.hidden_size // config.num_attention_heads, ) # check shape key, value self.assertListEqual( [layer_past_key_values[0].shape for layer_past_key_values in past_key_values], [expected_shape] * len(past_key_values), ) self.assertListEqual( [layer_past_key_values[1].shape for layer_past_key_values in past_key_values], [expected_shape] * len(past_key_values), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_torch_available(): framework_dependent_parameters = { "AutoModelForCausalLM": AutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": AutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": AutoModelForSeq2SeqLM, "AutoModelForVision2Seq": AutoModelForVision2Seq, "LogitsProcessorList": LogitsProcessorList, "MinLengthLogitsProcessor": MinLengthLogitsProcessor, "create_tensor_fn": torch.tensor, "floats_tensor": floats_tensor, "return_tensors": "pt", } @slow def test_diverse_beam_search(self): # PT-only test: TF doesn't have a diverse beam search implementation article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria & group beam search article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids, model_kwargs = bart_model._prepare_decoder_input_ids_for_generation( batch_size=input_ids.shape[0], model_input_name=bart_model.main_input_name, model_kwargs=model_kwargs, decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_custom_stopping_criteria_overload_error(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_generate_non_nlp_input_ids_as_kwarg(self): # PT-only test: AFAIK there's no non-NLP model architecture in TF that supports `input_ids` as its only input model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_values_as_encoder_kwarg(self): # PT-only test: AFAIK there's no generate-capable architecture in TF that supports `input_values` as its input input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_transition_scores_group_beam_search_encoder_decoder(self): # PT-only test: TF doesn't have group beam search articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, diversity_penalty=1.0, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_beam_search_example_integration(self): # PT-only test: TF doesn't have a BeamSearchScorer # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_cfg_mixin(self): model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") input = tokenizer(["The dragon flew over Paris,"], return_tensors="pt", return_attention_mask=True) input["input_ids"] = input["input_ids"].to(torch_device) input["attention_mask"] = input["attention_mask"].to(torch_device) outputs = model.generate(**input, max_new_tokens=32, guidance_scale=1.5) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The dragon flew over Paris, landing in the Rue de la Bastille. The crowd was so excited " 'that they had to leave the city.\n\n"We\'re going to Paris!"\n' ], ) neg = tokenizer(["France,"], return_tensors="pt", return_attention_mask=True) neg["input_ids"] = neg["input_ids"].to(torch_device) neg["attention_mask"] = neg["attention_mask"].to(torch_device) outputs = model.generate( **input, max_new_tokens=32, guidance_scale=1.5, negative_prompt_ids=neg["input_ids"], negative_prompt_attention_mask=neg["attention_mask"], ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ 'The dragon flew over Paris, landing on the pavement.\n\n"Paris!"\n\n"Paris!"\n\n"' 'Paris!"\n\n"Paris!"\n\n"Paris!"\n\n' ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # PT-only test: TF doesn't have constrained beam search # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has TF equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 20 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) # Only some seeds will work both on CPU/GPU for a fixed `expectation` value. # The selected seed is not guaranteed to work on all torch versions. torch.manual_seed(1) eos_token_id = 846 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) torch.manual_seed(1) eos_token_id = [846, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has TF equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) output = bart_model.generate(input_ids).cpu().numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=None, **kwargs): return super().forward(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def forward(self, input_ids, **kwargs): return super().forward(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).cpu().numpy() with self.assertRaises(TypeError): # FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar") def test_default_max_length_warning(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Default generation config value of 20 -> emits warning with self.assertWarns(UserWarning): model.generate(input_ids) # Explicitly setting max_length to 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: model.generate(input_ids, max_length=20) self.assertEqual(len(warning_list), 0) # Generation config max_length != 20 -> no warning with warnings.catch_warnings(record=True) as warning_list: # generation_config is modified -> legacy mode is disabled = generation_config takes precedence model.generation_config.max_length = 10 model.generate(input_ids) self.assertEqual(len(warning_list), 0) def test_model_kwarg_assisted_decoding_decoder_only(self): # PT-only test: TF doesn't support assisted decoding yet. model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with token_type_ids outputs_tti = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), ) with self.assertRaises(AssertionError): self.assertListEqual(outputs_tti.tolist(), outputs_normal.tolist()) # Assistant model assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) assistant.config.pad_token_id = tokenizer.eos_token_id # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device), assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_tti.tolist()) def test_model_kwarg_assisted_decoding_encoder_decoder(self): # PT-only test: TF doesn't support assisted decoding yet. # Bart subclass with a kwarg that distorts the output class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=False, **kwargs): outs = super().forward(input_ids, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with foo outputs_foo = model.generate( input_ids, foo=True, ) with self.assertRaises(AssertionError): self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) # Assistant model assistant = AutoModelForSeq2SeqLM.from_pretrained( "hf-internal-testing/tiny-random-BartForConditionalGeneration" ).to(torch_device) # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, foo=True, assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) # Check that passing encoder_outputs directly also works as expected encoder_outputs = assistant.get_encoder()(input_ids) outputs_assisted = model.generate( foo=True, assistant_model=assistant, encoder_outputs=encoder_outputs, assistant_encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) def test_assisted_decoding_encoder_decoder_shared_encoder(self): # PT-only test: TF doesn't support assisted decoding yet. # Bart subclass with a kwarg called foo that distorts the output class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=False, **kwargs): outs = super().forward(input_ids, **kwargs) if foo: outs["logits"][:, :, :] = 0.0 return outs def prepare_inputs_for_generation(self, *args, foo=False, encoder_outputs=None, **kwargs): kwargs["encoder_outputs"] = encoder_outputs inputs = super().prepare_inputs_for_generation(*args, **kwargs) inputs["foo"] = foo return inputs model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration") text = "Hello world" tokenized_inputs = tokenizer([text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_normal = model.generate(input_ids) self.assertEqual(outputs_normal.shape, (1, 20)) # Should be different with foo outputs_foo = model.generate(input_ids, foo=True) with self.assertRaises(AssertionError): self.assertListEqual(outputs_foo.tolist(), outputs_normal.tolist()) # Assistant model assistant = BartForCausalLM.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration").to( torch_device ) # If assisted generation passes model_kwargs correctly, should be same as previous outputs_assisted = model.generate( input_ids, foo=True, assistant_model=assistant, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist()) # Check that passing encoder_outputs directly also works as expected encoder_outputs = model.get_encoder()(input_ids) outputs_assisted = model.generate( foo=True, assistant_model=assistant, encoder_outputs=encoder_outputs, ) self.assertListEqual(outputs_assisted.tolist(), outputs_foo.tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_framework_agnostic.py
""" Framework agnostic tests for generate()-related methods. """ import numpy as np from transformers import AutoTokenizer from transformers.testing_utils import slow, torch_device class GenerationIntegrationTestsMixin: # To be populated by the child classes framework_dependent_parameters = { "AutoModelForCausalLM": None, "AutoModelForSpeechSeq2Seq": None, "AutoModelForSeq2SeqLM": None, "AutoModelForVision2Seq": None, "LogitsProcessorList": None, "MinLengthLogitsProcessor": None, "create_tensor_fn": None, "floats_tensor": None, "return_tensors": None, "set_seed": None, } def test_validate_generation_inputs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors=return_tensors).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs) # however, valid model_kwargs are accepted valid_model_kwargs = {"attention_mask": create_tensor_fn(np.zeros_like(input_ids))} model.generate(input_ids, **valid_model_kwargs) def test_custom_logits_processor(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] logits_processor_list_cls = self.framework_dependent_parameters["LogitsProcessorList"] min_length_logits_processor_cls = self.framework_dependent_parameters["MinLengthLogitsProcessor"] return_tensors = self.framework_dependent_parameters["return_tensors"] bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", min_length=1) input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids logits_processor = logits_processor_list_cls() logits_processor.append(min_length_logits_processor_cls(min_length=10, eos_token_id=0)) # it should not be allowed to both define `min_length` via config and `logits_processor` list with self.assertRaises(ValueError): bart_model.generate(input_ids, logits_processor=logits_processor) bart_model.config.min_length = None bart_model.generate(input_ids, logits_processor=logits_processor) def test_max_new_tokens_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart") input_ids = bart_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: bart_model = bart_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 29]) max_new_tokens = 3 bart_model.config.max_length = 20 bart_model.config.eos_token_id = None # Encoder decoder call outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 3 new tokens self.assertEqual(list(outputs.shape), [1, 4]) # Decoder only call outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens) # 1 BOS + 29 (input length) + 3 new tokens self.assertEqual(list(outputs.shape), [1, 33]) # Encoder decoder call > 20 outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS + 20 + 3 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_max_new_tokens_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake.""" gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: gpt2_model = gpt2_model.to(torch_device) input_ids = input_ids.to(torch_device) self.assertEqual(list(input_ids.shape), [1, 9]) max_new_tokens = 3 gpt2_model.config.max_length = 20 # call < 20 outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens) # 9 input_ids + 3 new tokens self.assertEqual(list(outputs.shape), [1, 12]) # call > 20 outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20) # 1 BOS token + 23 new tokens self.assertEqual(list(outputs.shape), [1, 24]) def test_encoder_decoder_generate_with_inputs_embeds(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids inputs_embeds = model.get_input_embeddings()(input_ids) output_sequences = model.generate(inputs_embeds=inputs_embeds) # make sure model generated correctly until `max_length` self.assertEqual(output_sequences.shape, (1, 5)) def test_transition_scores_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilgpt2") input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-57.8844, -60.45698, -70.16364, -65.50791, -66.35648], [-54.417572, -60.216614, -62.661243, -58.621933, -58.298683], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_greedy_search_normalized(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Justin Timberlake", "Michael Phelps"] tokenizer = AutoTokenizer.from_pretrained("distilgpt2", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained("distilgpt2") input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids=input_ids, max_new_tokens=5, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, ) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True) if is_pt: transition_scores = transition_scores.cpu().numpy() expected_scores = np.array( [ [-2.538938, -2.2694316, -2.1580915, -1.572299, -2.6719835], [-1.8826028, -2.2461371, -1.7556462, -2.9644494, -1.7996008], ] ) self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_encoder_decoder_with_eos(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=4, num_return_sequences=2, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_search_decoder_only(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake", "Michael Phelps", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer.pad_token = tokenizer.eos_token model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-gpt2", max_length=10, num_beams=4, num_return_sequences=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) def test_transition_scores_beam_sample_encoder_decoder(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", do_sample=True, max_length=10, num_beams=4, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) input_ids = tokenizer(articles, return_tensors=return_tensors, padding=True).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3)) @slow def test_transition_scores_early_stopping(self): # This is an aggressive test that makes sure that `beam_search's` # transition scores are computed correctly for varying `num_return_sequences`, `num_beams` and `batch_size > 1` # 2 x input_ids for "question: How are you? \n context: I had a long day, " model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_ids = create_tensor_fn(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]]) model = model_cls.from_pretrained("t5-small") if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) outputs = model.generate( input_ids, max_length=10, return_dict_in_generate=True, output_scores=True, forced_eos_token_id=model.config.eos_token_id, num_beams=4, do_sample=False, num_return_sequences=3, length_penalty=0.0, ) transition_scores = model.compute_transition_scores( sequences=outputs.sequences, scores=outputs.scores, beam_indices=outputs.beam_indices ) if is_pt: transition_scores = transition_scores.cpu().numpy() outputs.sequences_scores = outputs.sequences_scores.cpu().numpy() self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores)) def test_encoder_decoder_generate_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"] tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") # need extreme generation values here to force this test # to fail when `attention_mask` is not correctly treated in generate model = model_cls.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5 ) model.config.eos_token_id = None input_ids = tokenizer(articles[0], return_tensors=return_tensors).input_ids input_ids_batched = tokenizer(articles, padding=True, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) input_ids_batched = input_ids_batched.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True) batched_out = output_sequences_batched.sequences_scores out = output_sequences.sequences_scores if is_pt: batched_out = batched_out.cpu().numpy() out = out.cpu().numpy() diff = np.abs(np.sum(batched_out[:5]) - np.sum(out)) self.assertTrue(diff < 1e-4) def test_generate_input_ids_as_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 15)) def test_generate_input_ids_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5) model.config.eos_token_id = None input_ids = tokenizer(article, return_tensors=return_tensors).input_ids if is_pt: model = model.to(torch_device) input_ids = input_ids.to(torch_device) output_sequences_kwargs = model.generate(input_ids=input_ids) output_sequences = model.generate(input_ids) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (1, 5)) def test_generate_inputs_and_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids, input_ids=input_ids) def test_generate_too_many_encoder_kwargs(self): model_cls = self.framework_dependent_parameters["AutoModelForSeq2SeqLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] article = """I need input_ids to generate""" tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = model_cls.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=10) input_ids = tokenizer(article, return_tensors=return_tensors).input_ids with self.assertRaises(ValueError): model.generate(input_ids=input_ids, inputs_embeds=input_ids) def test_generate_input_features_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(input_features=input_features, max_length=5) output_sequences = model.generate(input_features, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (3, 5)) def test_generate_pixel_values_as_encoder_kwarg(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") model.config.decoder.eos_token_id = None if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5) output_sequences = model.generate(pixel_values, max_length=5) if is_pt: output_sequences_kwargs = output_sequences_kwargs.cpu().numpy() output_sequences = output_sequences.cpu().numpy() self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs)) self.assertEqual(output_sequences.shape, (2, 5)) def test_generate_encoder_outputs_attention_mask(self): model_cls = self.framework_dependent_parameters["AutoModelForSpeechSeq2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") input_features = floats_tensor((3, 80, 60)) attention_mask = create_tensor_fn(np.ones(input_features.shape)) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-WhisperForConditionalGeneration") if is_pt: input_features = input_features.to(torch_device) attention_mask = attention_mask.to(torch_device) model = model.to(torch_device) encoder = model.get_encoder() encoder_outputs = encoder(input_features) output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs) output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask) if is_pt: output_sequences_no_mask = output_sequences_no_mask.cpu().numpy() output_sequences_with_mask = output_sequences_with_mask.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_no_mask, output_sequences_with_mask)) def test_eos_token_id_int_and_list_greedy_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_contrastive_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 1, "penalty_alpha": 0.6, "top_k": 4, } expectation = 17 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 225 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [225, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_eos_token_id_int_and_list_beam_search(self): model_cls = self.framework_dependent_parameters["AutoModelForCausalLM"] return_tensors = self.framework_dependent_parameters["return_tensors"] is_pt = not model_cls.__name__.startswith("TF") generation_kwargs = { "do_sample": False, "num_beams": 3, } expectation = 13 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors=return_tensors) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2") if is_pt: model = model.to(torch_device) tokens = tokens.to(torch_device) eos_token_id = 873 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) eos_token_id = [873, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) unpadded_correct_condition = expectation == len(generated_tokens[0]) padded_correct_condition = expectation < len(generated_tokens[0]) and all( token == model.config.pad_token_id for token in generated_tokens[0][expectation:] ) self.assertTrue(unpadded_correct_condition or padded_correct_condition) def test_generate_vision2text_conditioning(self): model_cls = self.framework_dependent_parameters["AutoModelForVision2Seq"] floats_tensor = self.framework_dependent_parameters["floats_tensor"] create_tensor_fn = self.framework_dependent_parameters["create_tensor_fn"] is_pt = not model_cls.__name__.startswith("TF") pixel_values = floats_tensor((2, 3, 30, 30)) conditioning_input = create_tensor_fn([[10], [10]]) # this should be the 2nd output token, after the BOS token model = model_cls.from_pretrained("hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2") if is_pt: pixel_values = pixel_values.to(torch_device) model = model.to(torch_device) conditioning_input = conditioning_input.to(torch_device) # we can condition on decoder_input_ids (expected decoder input) and input_ids (which we pipe internally as # decoder_input_ids, if the encoder is not a model with text input) output_sequences_decoder_input_ids = model.generate( pixel_values, max_length=5, decoder_input_ids=conditioning_input ) output_sequences_input_ids = model.generate(pixel_values, max_length=5, input_ids=conditioning_input) if is_pt: output_sequences_decoder_input_ids = output_sequences_decoder_input_ids.cpu().numpy() output_sequences_input_ids = output_sequences_input_ids.cpu().numpy() conditioning_input = conditioning_input.cpu().numpy() self.assertTrue(np.array_equal(output_sequences_decoder_input_ids, output_sequences_input_ids)) self.assertTrue(np.array_equal(output_sequences_decoder_input_ids[:, 1:2], conditioning_input))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_streamers.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class StreamerTester(unittest.TestCase): def test_text_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, greedy_text) def test_iterator_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) streamer = TextIteratorStreamer(tokenizer) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() streamer_text = "" for new_text in streamer: streamer_text += new_text self.assertEqual(streamer_text, greedy_text) def test_text_streamer_skip_prompt(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) new_greedy_ids = greedy_ids[:, input_ids.shape[1] :] new_greedy_text = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_prompt=True) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, new_greedy_text) def test_text_streamer_decode_kwargs(self): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_special_tokens=True) model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token streamer_text = cs.out[:-1] # Remove the final "\n" streamer_text_tokenized = tokenizer(streamer_text, return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1)) def test_iterator_streamer_timeout(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) streamer = TextIteratorStreamer(tokenizer, timeout=0.001) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(Empty): streamer_text = "" for new_text in streamer: streamer_text += new_text
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_beam_search.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_beam_constraints.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) # fails here def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) # Completed! self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_configuration_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import warnings from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class GenerationConfigTest(unittest.TestCase): @parameterized.expand([(None,), ("foo.json",)]) def test_save_load_config(self, config_name): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, config_name=config_name) loaded_config = GenerationConfig.from_pretrained(tmp_dir, config_name=config_name) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.temperature, 0.7) self.assertEqual(loaded_config.length_penalty, 1.0) self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k, 50) self.assertEqual(loaded_config.max_length, 20) self.assertEqual(loaded_config.max_time, None) def test_from_model_config(self): model_config = AutoConfig.from_pretrained("gpt2") generation_config_from_model = GenerationConfig.from_model_config(model_config) default_generation_config = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(generation_config_from_model, default_generation_config) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id) def test_update(self): generation_config = GenerationConfig() update_kwargs = { "max_new_tokens": 1024, "foo": "bar", } update_kwargs_copy = copy.deepcopy(update_kwargs) unused_kwargs = generation_config.update(**update_kwargs) # update_kwargs was not modified (no side effects) self.assertEqual(update_kwargs, update_kwargs_copy) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens, 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(unused_kwargs, {"foo": "bar"}) def test_initialize_new_kwargs(self): generation_config = GenerationConfig() generation_config.foo = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo, "bar") generation_config = GenerationConfig.from_model_config(new_config) assert not hasattr(generation_config, "foo") # no new kwargs should be initialized if from config def test_kwarg_init(self): """Tests that we can overwrite attributes at `from_pretrained` time.""" default_config = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, False) self.assertEqual(default_config.num_beams, 1) config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, True) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) loaded_config = GenerationConfig.from_pretrained(tmp_dir, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.num_beams, 1) # default value def test_validate(self): """ Tests that the `validate` method is working as expected. Note that `validate` is called at initialization time """ # Case 1: A correct configuration will not throw any warning with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig() self.assertEqual(len(captured_warnings), 0) # Case 2: Inconsequent but technically wrong configuration will throw a warning (e.g. setting sampling # parameters with `do_sample=False`). May be escalated to an error in the future. with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig(temperature=0.5) self.assertEqual(len(captured_warnings), 1) # Case 3: Impossible sets of contraints/parameters will raise an exception with self.assertRaises(ValueError): GenerationConfig(num_return_sequences=2) # Case 4: Passing `generate()`-only flags to `validate` will raise an exception with self.assertRaises(ValueError): GenerationConfig(logits_processor="foo") # Case 5: Model-specific parameters will NOT raise an exception or a warning with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig(foo="bar") self.assertEqual(len(captured_warnings), 0) def test_refuse_to_save(self): """Tests that we refuse to save a generation config that fails validation.""" # setting the temperature alone is invalid, as we also need to set do_sample to True -> throws a warning that # is caught, doesn't save, and raises a warning config = GenerationConfig() config.temperature = 0.5 with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 1) self.assertTrue("Fix these issues to save the configuration." in str(captured_warnings[0].message)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # greedy decoding throws an exception if we try to return multiple sequences -> throws an exception that is # caught, doesn't save, and raises a warning config = GenerationConfig() config.num_return_sequences = 2 with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 1) self.assertTrue("Fix these issues to save the configuration." in str(captured_warnings[0].message)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # final check: no warnings thrown if it is correct, and file is saved config = GenerationConfig() with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 0) self.assertTrue(len(os.listdir(tmp_dir)) == 1) @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org") except HTTPError: pass def test_push_to_hub(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("test-generation-config", token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-generation-config", push_to_hub=True, token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("valid_org/test-generation-config-org", token=self._token) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( tmp_dir, repo_id="valid_org/test-generation-config-org", push_to_hub=True, token=self._token ) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k))
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_tf_utils.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeq2SeqLM, TFAutoModelForSpeechSeq2Seq, TFAutoModelForVision2Seq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p_filtering function behaves as expected def test_top_k_top_p_filtering(self): logits = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ], dtype=tf.float32, ) non_inf_expected_idx = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.int32, ) # expected non filtered idx as noted above non_inf_expected_output = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], dtype=tf.float32, ) # expected non filtered values as noted above output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")] non_inf_idx = tf.cast( tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))), dtype=tf.int32, ) tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12) tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx) @require_tf class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): framework_dependent_parameters = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": TFAutoModelForSeq2SeqLM, "AutoModelForVision2Seq": TFAutoModelForVision2Seq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def test_generate_tf_function_export_fixed_input_length(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") input_length = 2 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((None, input_length), tf.int32, name="input_ids"), tf.TensorSpec((None, input_length), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2, 0], [102, 103]] dummy_attention_masks = [[1, 0], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for batch_size in range(1, len(dummy_input_ids) + 1): inputs = { "input_ids": tf.constant(dummy_input_ids[:batch_size]), "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow def test_generate_tf_function_export_fixed_batch_size(self): # TF-only test: tf.saved_model export test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") batch_size = 1 max_new_tokens = 2 class DummyModel(tf.Module): def __init__(self, model): super(DummyModel, self).__init__() self.model = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None), tf.int32, name="input_ids"), tf.TensorSpec((batch_size, None), tf.int32, name="attention_mask"), ), jit_compile=True, ) def serving(self, input_ids, attention_mask): outputs = self.model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True, ) return {"sequences": outputs["sequences"]} dummy_input_ids = [[2], [102, 103]] dummy_attention_masks = [[1], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] for input_row in range(len(dummy_input_ids)): inputs = { "input_ids": tf.constant([dummy_input_ids[input_row]]), "attention_mask": tf.constant([dummy_attention_masks[input_row]]), } tf_func_outputs = serving_func(**inputs)["sequences"] tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens) tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) @slow @require_tensorflow_text def test_generate_tf_function_export_with_tf_tokenizer(self): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small", filename="spiece.model", local_dir=tmp_dir) class CompleteSentenceTransformer(tf.keras.layers.Layer): def __init__(self): super().__init__() self.tokenizer = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(tmp_dir, "spiece.model"), "rb").read() ) self.model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") def call(self, inputs, *args, **kwargs): tokens = self.tokenizer.tokenize(inputs) input_ids, attention_mask = text.pad_model_inputs( tokens, max_seq_length=64, pad_value=self.model.config.pad_token_id ) outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask) return self.tokenizer.detokenize(outputs) complete_model = CompleteSentenceTransformer() inputs = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name="inputs") outputs = complete_model(inputs) keras_model = tf.keras.Model(inputs, outputs) keras_model.save(tmp_dir) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has PT equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 14 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="tf") model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") eos_token_id = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) eos_token_id = [638, 198] with tf.device(":/CPU:0"): tf.random.set_seed(0) generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_model_kwarg_encoder_signature_filtering(self): # Has PT equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="tf").input_ids bart_model = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart") output = bart_model.generate(input_ids).numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(TFBartForConditionalGeneration): def call(self, input_ids, foo=None, **kwargs): return super().call(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart") fake_output = bart_model.generate(input_ids, foo="bar").numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def call(self, input_ids, **kwargs): return super().call(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).numpy() with self.assertRaises(ValueError): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar")
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_tf_logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from parameterized import parameterized from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.generation import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from ..test_modeling_tf_common import ids_tensor @require_tf class TFLogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = tf.ones((batch_size, length), dtype=tf.float32) / length return scores @parameterized.expand([(False,), (True,)]) def test_min_length_dist_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) if use_xla: min_dist_processor = tf.function(min_dist_processor, jit_compile=True) # check that min length is applied at length 5 cur_len = 5 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].numpy().tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 cur_len = 15 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(scores_before_min_length)).numpy()) @parameterized.expand([(False,), (True,)]) def test_temperature_dist_warper(self, use_xla): input_ids = None cur_len = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.numpy() scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch scores = tf.convert_to_tensor(scores) # compute softmax probs = tf.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = TFTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TFTemperatureLogitsWarper(temperature=1.3) if use_xla: temp_dist_warper_sharper = tf.function(temp_dist_warper_sharper, jit_compile=True) temp_dist_warper_smoother = tf.function(temp_dist_warper_smoother, jit_compile=True) warped_prob_sharp = tf.nn.softmax(temp_dist_warper_sharper(input_ids, tf.identity(scores), cur_len), axis=-1) warped_prob_smooth = tf.nn.softmax(temp_dist_warper_smoother(input_ids, tf.identity(scores), cur_len), axis=-1) # uniform distribution stays uniform tf.debugging.assert_near(probs[0, :], warped_prob_sharp[0, :], atol=1e-3) tf.debugging.assert_near(probs[0, :], warped_prob_smooth[0, :], atol=1e-3) # sharp peaks get higher, valleys get lower self.assertLess(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_sharp[1, :])) self.assertGreater(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_sharp[1, :])) # smooth peaks get lower, valleys get higher self.assertGreater(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_smooth[1, :])) self.assertLess(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_smooth[1, :])) @parameterized.expand([(False,), (True,)]) def test_repetition_penalty_dist_process(self, use_xla): vocab_size = 10 cur_len = 2 input_ids = tf.constant([[0, 1], [5, 0]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size=2, length=vocab_size) mask = tf.cast(tf.constant([[1] + 9 * [0], 10 * [0]]), tf.bool) scores = tf.where(mask, -1 / vocab_size, scores) mask = tf.cast(tf.constant([10 * [0], 5 * [0] + [1] + 4 * [0]]), tf.bool) scores = tf.where(mask, 4 / vocab_size, scores) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) if use_xla: rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) scores = rep_penalty_proc(input_ids, tf.identity(scores), cur_len) # check that values were correctly changed (negative scores for used tokens should increase, others # should decrease) self.assertAlmostEqual(scores[0, 0].numpy(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change self.assertAlmostEqual(scores[1, 0].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].numpy(), (4 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change @parameterized.expand([(False,), (True,)]) def test_top_k_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size, dtype=np.float32), (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TFTopKLogitsWarper(3) if use_xla: top_k_warp = tf.function(top_k_warp, jit_compile=True) scores = top_k_warp(input_ids, ramp_logits, cur_len) # check that correct tokens are filtered self.assertListEqual(tf.math.is_inf(scores[0]).numpy().tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(tf.math.is_inf(scores[1]).numpy().tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TFTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) if use_xla: top_k_warp_safety_check = tf.function(top_k_warp_safety_check, jit_compile=True) scores = top_k_warp_safety_check(input_ids, logits, cur_len) # uniform dist is not changed self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [0, 0]) ramp_logits = np.broadcast_to(np.arange(length, dtype=np.float32), (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [2, 2]) @parameterized.expand([(False,), (True,)]) def test_top_p_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TFTopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], dtype=np.float32)) # top_p should have been 0.8 to test the edge case of top_p being exactly equal to sum of some token prob # However, due to the numerical instability of softmax in TF we choose this as the edge case # top_p as 0.8 passes when use_xla is True and fails when False. Refer PR #18984. top_p_warp = TFTopPLogitsWarper(0.79999995) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = tf.exp(top_p_warp(input_ids, dist, cur_len)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = tf.constant([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], dtype=tf.float32) tf.debugging.assert_near(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to( np.arange(vocab_size, dtype=np.float32)[None, :], (batch_size, vocab_size) ).copy() - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TFTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps # 2. self.assertListEqual( tf.math.reduce_sum(tf.where(filtered_dist != 0.0, 1, 0), axis=-1).numpy().tolist(), [3, 2] ) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 cur_len = 4 input_ids = tf.constant([[1, 1, 2, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = TFNoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = TFNoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, tf.identity(scores), cur_len) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, tf.identity(scores), cur_len) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_2_gram).numpy().tolist(), [[False, True, True], [True, False, False]] ) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_3_gram).numpy().tolist(), [[False, False, False], [True, False, False]] ) @parameterized.expand([(False,), (True,)]) def test_no_bad_words_dist_processor(self, use_xla): vocab_size = 5 batch_size = 2 eos_token_id = 4 cur_len = 4 input_ids = tf.constant([[0, 1, 3, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) if use_xla: no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) filtered_scores = no_bad_words_dist_proc(input_ids, tf.identity(scores), cur_len) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden self.assertListEqual( tf.math.is_inf(filtered_scores).numpy().tolist(), [[True, True, False, True, True], [True, True, True, False, True]], ) @parameterized.expand([(False,), (True,)]) def test_forced_bos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = TFForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the bos_token_id score cur_len = 1 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, bos_token_id + 1 :]) & (scores[:, bos_token_id + 1 :] < 0)) ) self.assertListEqual(scores[:, bos_token_id].numpy().tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_forced_eos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = TFForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the eos_token_id when max_length-1 is reached cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, eos_token_id + 1 :]) & (scores[:, eos_token_id + 1 :] < 0)) ) self.assertListEqual( scores[:, eos_token_id].numpy().tolist(), 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached cur_len = 3 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_at_begin_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 begin_suppress_tokens = [1, 2, 3] begin_index = 5 logits_processor = TFSuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=begin_suppress_tokens, begin_index=begin_index ) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that no scores are suppressed if begin_index is not reached cur_len = 4 input_ids = tf.convert_to_tensor([[11, 17, 15, 8], [14, 0, 19, 5], [13, 11, 18, 19], [11, 12, 16, 15]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) # Check that scores are suppressed if begin_index is reached cur_len = 5 input_ids = tf.convert_to_tensor([[5, 5, 5, 0, 17], [18, 1, 9, 14, 17], [18, 6, 8, 15, 19], [8, 12, 17, 1, 2]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, begin_suppress_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 suppress_tokens = [1, 3, 5] keep_tokens = [i for i in range(vocab_size) if i not in suppress_tokens] logits_processor = TFSuppressTokensLogitsProcessor(suppress_tokens=suppress_tokens) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that suppress_tokens are suppressed and others are not cur_len = 5 input_ids = tf.convert_to_tensor([[0, 10, 19, 6, 3], [17, 4, 8, 17, 2], [7, 1, 11, 6, 15], [5, 8, 13, 16, 0]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, suppress_tokens, axis=1)))) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(tf.gather(scores, keep_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_force_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 force_token_map = {1: 2, 3: 2} logits_processor = TFForceTokensLogitsProcessor(force_token_map=force_token_map) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that if the cur_len is contained in the force_token_map, the logits are the same # for all tokens except the one the force_token_map points to cur_len = 1 input_ids = tf.convert_to_tensor([[11], [7], [5], [15]]) ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) tf.debugging.assert_near(tf.gather(scores, [force_token_map[cur_len]], axis=1), 0.0) non_forced_inds = [i for i in range(vocab_size) if i != force_token_map[cur_len]] self.assertTrue( tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, [non_forced_inds], axis=1))), ) # check that if the cur_len is not contained in the force_token_map, the logits are not modified cur_len = 2 input_ids = tf.convert_to_tensor([[2, 19], [19, 15], [4, 9], [7, 6]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_processor_list(self, use_xla): # TODO (Joao): reintroduce TFNoRepeatNGramLogitsProcessor when it gets compatible with XLA batch_size = 4 cur_len = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, cur_len), vocab_size) input_ids_comp = tf.identity(input_ids) scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = tf.identity(scores) # instantiate all dist processors min_dist_proc = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TFTemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TFTopKLogitsWarper(3) top_p_warp = TFTopPLogitsWarper(0.8) # no_repeat_proc = TFNoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) if use_xla: min_dist_proc = tf.function(min_dist_proc, jit_compile=True) temp_dist_warp = tf.function(temp_dist_warp, jit_compile=True) rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) top_k_warp = tf.function(top_k_warp, jit_compile=True) top_p_warp = tf.function(top_p_warp, jit_compile=True) # no_repeat_proc = tf.function(no_repeat_proc, jit_compile=True) no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) # no processor list scores = min_dist_proc(input_ids, scores, cur_len) scores = temp_dist_warp(input_ids, scores, cur_len) scores = rep_penalty_proc(input_ids, scores, cur_len) scores = top_k_warp(input_ids, scores, cur_len) scores = top_p_warp(input_ids, scores, cur_len) # scores = no_repeat_proc(input_ids, scores, cur_len) scores = no_bad_words_dist_proc(input_ids, scores, cur_len) # with processor list processor = TFLogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, # no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp, cur_len) # remove inf scores = tf.where(tf.math.is_inf(scores), -1e9, scores) scores_comp = tf.where(tf.math.is_inf(scores_comp), -1e9, scores_comp) # scores should be equal tf.debugging.assert_near(scores, scores_comp, atol=1e-3) # input_ids should never be changed self.assertListEqual(input_ids.numpy().tolist(), input_ids_comp.numpy().tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_flax_logits_process.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = jnp.ones((batch_size, length)) / length return scores def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch scores = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax probs = jax.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1) warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = FlaxTopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits, cur_len=None) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special case length = 5 top_k_warp_safety_check = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) ramp_logits = np.broadcast_to(np.arange(length)[None, :], (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len=None) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) top_p_warp = FlaxTopPLogitsWarper(0.8) filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2]) def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 20), vocab_size=20) cur_len = 5 scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 scores = self._get_uniform_logits(batch_size, vocab_size) cur_len = 15 scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores_before_min_length).any()) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) cur_len = 1 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) cur_len = 4 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() # instantiate all dist processors temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 # no processor list scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) # with processor list processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores_comp = processor(input_ids, scores_comp, cur_len=cur_len) # scores should be equal self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_processor_list_jitted(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() # instantiate all dist processors temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 # no processor list def run_no_processor_list(input_ids, scores, cur_len): scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) return scores # with processor list def run_processor_list(input_ids, scores, cur_len): processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores = processor(input_ids, scores, cur_len=cur_len) return scores jitted_run_no_processor_list = jax.jit(run_no_processor_list) jitted_run_processor_list = jax.jit(run_processor_list) scores = jitted_run_no_processor_list(input_ids, scores, cur_len) scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len) # scores should be equal self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_stopping_criteria.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class StoppingCriteriaTestCase(unittest.TestCase): def _get_tensors(self, length): batch_size = 3 vocab_size = 250 input_ids = ids_tensor((batch_size, length), vocab_size) scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return input_ids, scores def test_list_criteria(self): input_ids, scores = self._get_tensors(5) criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ] ) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_length_criteria(self): criteria = MaxLengthCriteria(max_length=10) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_new_tokens_criteria(self): criteria = MaxNewTokensCriteria(start_length=5, max_new_tokens=5) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) criteria_list = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length, 10) def test_max_time_criteria(self): input_ids, scores = self._get_tensors(5) criteria = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(input_ids, scores)) criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(input_ids, scores)) def test_validate_stopping_criteria(self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10) with self.assertWarns(UserWarning): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11) stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11) self.assertEqual(len(stopping_criteria), 1)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_flax_utils.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask @require_flax class FlaxGenerationTesterMixin: model_tester = None all_generative_model_classes = () def _get_input_ids_and_config(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = inputs["input_ids"].shape[-1] // 2 input_ids = inputs["input_ids"][:max_batch_size, :sequence_length] attention_mask = jnp.ones_like(input_ids) attention_mask = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens max_length = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def test_greedy_generate_pt_fx(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.decoder_start_token_id = 0 for model_class in self.all_generative_model_classes: flax_model = model_class(config) pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, flax_model.params) flax_generation_outputs = flax_model.generate(input_ids).sequences pt_generation_outputs = pt_model.generate(torch.tensor(input_ids, dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: flax_generation_outputs = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist()) def test_greedy_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_num_return_sequences(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 config.num_return_sequences = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences) def test_sample_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length config.temperature = 0.8 config.top_k = 10 config.top_p = 0.3 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.num_beams = 2 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # pad attention mask on the left attention_mask = attention_mask.at[(0, 0)].set(0) config.num_beams = 2 config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) @require_flax class FlaxGenerationIntegrationTests(unittest.TestCase): def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") model = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="np").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/generation/test_logits_process.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import List, Union from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from torch import nn from transformers.generation import ( EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, ) from transformers.generation.logits_process import BarkEosPrioritizerLogitsProcessor @require_torch class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return scores def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) # check that min length is applied at length 5 input_ids = ids_tensor((batch_size, 5), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) @parameterized.expand([(0,), ([0, 18],)]) def test_new_min_length_dist_processor(self, eos_token_id: Union[int, List[int]]): vocab_size = 20 batch_size = 4 # check that first input is skipped (min new length applying) input_ids = ids_tensor((batch_size, 5), vocab_size=20) new_min_dist_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=input_ids.shape[-1], min_new_tokens=3, eos_token_id=eos_token_id ) expected_eos_scores_before_min_length = batch_size * [-float("inf")] if isinstance(eos_token_id, list): expected_eos_scores_before_min_length *= len(eos_token_id) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that, for skipping, now prompt length is 5, after that we expect first 5 tokens will be skipped self.assertTrue(new_min_dist_processor.prompt_length_to_skip == 5) # check that min length is applied at length 2 input_ids = ids_tensor((batch_size, 2), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 6 (because it has only 1 new token) input_ids = ids_tensor((batch_size, 6), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is applied at length 7 (because it has only 2 new tokens) input_ids = ids_tensor((batch_size, 7), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) # check that min new length is not applied anymore at length 8 input_ids = ids_tensor((batch_size, 8), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) # check that min new length is not applied anymore at length 15 input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch # compute softmax probs = nn.functional.softmax(scores, dim=-1) temp_dist_warper_sharper = TemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = nn.functional.softmax(temp_dist_warper_sharper(input_ids, scores.clone()), dim=-1) warped_prob_smooth = nn.functional.softmax(temp_dist_warper_smoother(input_ids, scores.clone()), dim=-1) # uniform distribution stays uniform self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) / 2) def test_encoder_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) # give values special values scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor(penalty=2.0, encoder_input_ids=input_ids) scores = rep_penalty_proc(input_ids, scores.clone()) # check that values were correctly changed self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) * 2) # check that values not in the encoder ids were NOT changed self.assertAlmostEqual(scores[0, 2].item(), (1 / vocab_size)) self.assertAlmostEqual(scores[1, 2].item(), (1 / vocab_size)) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = ( torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) ) ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits) # check that correct tokens are filtered self.assertListEqual(torch.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(torch.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) scores = top_k_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) ramp_logits = torch.arange(length, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) scores = top_k_warp_safety_check(input_ids, ramp_logits) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float) ) top_p_warp = TopPLogitsWarper(0.8) filtered_dist = torch.exp(top_p_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_typical_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.97, 0.01, 0.01, 0.01], [0.4, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float) ) typical_warp = TypicalLogitsWarper(0.5) filtered_dist = torch.exp(typical_warp(input_ids, dist)) # dist should be filtered to keep min num values so that sum is >= 0.7 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) typical_warp_safety_check = TypicalLogitsWarper(mass=0.5, filter_value=0.0, min_tokens_to_keep=3) scores = typical_warp_safety_check(input_ids, logits) # uniform dist is not changed self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept typical_warp = TypicalLogitsWarper(0.7, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = typical_warp(input_ids, ramp_logits) # first batch should keep two tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_epsilon_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor( [[0.87, 0.099, 0.001, 0.03], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) ) epsilon_warp = EpsilonLogitsWarper(0.1) filtered_dist = torch.exp(epsilon_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= 0.1 # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.87, 0, 0, 0], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept epsilon_warp = EpsilonLogitsWarper(5e-2, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = epsilon_warp(input_ids, ramp_logits) # first batch should keep 3 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_eta_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = torch.log( torch.tensor([[0.0, 0.1, 0.8, 0.1], [0.01, 0.04, 0.9, 0.05]], device=torch_device, dtype=torch.float) ) eta_warp = EtaLogitsWarper(0.0625) filtered_dist = torch.exp(eta_warp(input_ids, dist)) # dist should be filtered to only keep values with proba >= min(0.0625, sqrt(0.0625) * e^-H(p)) # min(0.0625, 0.1320) is the cutoff for the first row and min(0.0625, 0.1644) is for the second # where H is the entropy function and p is the probability vector. # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.0, 0.1, 0.8, 0.1], [0.0, 0.0, 0.9, 0.0]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) # check edge cases with negative and extreme logits ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept eta_warp = EtaLogitsWarper(0.1, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = eta_warp(input_ids, ramp_logits) # first batch should keep 2 tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 input_ids = torch.tensor([[1, 1, 2, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = NoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = NoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [True, False, False]]) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, False, False], [True, False, False]] ) def test_encoder_no_repeat_ngram_dist_processor(self): vocab_size = 3 num_beams = 2 batch_size = 1 encoder_input_ids = torch.tensor([1, 2, 1, 1], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [8, 0, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2-gram would forbid 1st and 2nd token at 1st beam and 1st token (0) at 2nd beam self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False]]) # 3-gram would forbid 1st token at 1st beam and no token at 2nd beam self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False]] ) # Batched input vocab_size = 3 num_beams = 2 batch_size = 2 encoder_input_ids = torch.tensor([[1, 2, 1, 1], [0, 0, 2, 1]], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [1, 0, 2], [0, 0, 0], [0, 2, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) # 2gram # Batch 1 # - Beam 1: tokens (1, 2) forbidden # - Beam 2: tokens (1) forbidden # Batch 2 # - Beam 1: tokens (0, 2) forbidden # - Beam 2: tokens (1) forbidden self.assertListEqual( torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False], [True, False, True], [False, True, False]], ) # Batch 1 # - Beam 1: tokens (1) forbidden # - Beam 2: tokens () forbidden # Batch 2 # - Beam 1: tokens (2) forbidden # - Beam 2: tokens () forbidden self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False], [False, False, True], [False, False, False]], ) def test_no_bad_words_dist_processor(self): vocab_size = 5 batch_size = 2 eos_token_id = 4 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden # Note that 5th element cannot be forbidden as it is EOS token self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[True, True, False, True, False], [True, True, True, False, False]] ) # check edge case no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3)) def test_bias_dist_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) positive_bias = {(1,): 100.0, (4,): 100.0} negative_bias = {(1, 0): -100.0, (0, 1, 2): -100.0, (1, 3, 1, 3): -100.0} # biases the same termination twice, to ensure we can handle overlapping terminations (it won't have an effect # on the test cases, though) negative_bias.update({(1, 3, 1, 3, 1, 3): -100.0}) sequence_bias = {**positive_bias, **negative_bias} # scores = 0 to facilitate checks scores = torch.zeros((batch_size, vocab_size), dtype=torch.float, device=torch_device) bias_dist_proc = SequenceBiasLogitsProcessor(sequence_bias=sequence_bias) filtered_scores = bias_dist_proc(input_ids, scores.clone()) # batch 1: positive bias: tokens (1, 4); negative bias: tokens (0, 3); neutral: tokens (2) # batch 2: positive bias: tokens (1, 4); negative bias: tokens (0, 2); neutral: tokens (3) self.assertListEqual( filtered_scores.tolist(), [[-100.0, 100.0, 0.0, -100.0, 100.0], [-100.0, 100.0, -100.0, 0.0, 100.0]] ) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.clone() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.clone() # instantiate all dist processors min_dist_proc = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TopKLogitsWarper(3) top_p_warp = TopPLogitsWarper(0.8) no_repeat_proc = NoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) # no processor list scores = min_dist_proc(input_ids, scores) scores = temp_dist_warp(input_ids, scores) scores = rep_penalty_proc(input_ids, scores) scores = top_k_warp(input_ids, scores) scores = top_p_warp(input_ids, scores) scores = no_repeat_proc(input_ids, scores) scores = no_bad_words_dist_proc(input_ids, scores) # with processor list processor = LogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp) # scores should be equal self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_prefix_constrained_logits_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, 1) filtered_scores = prefix_constrained_logits_proc(input_ids, scores.clone()) # batch 1: 1st, 2nd (0, 1) token are allowed # batch 2: 3rd, 4th (2, 3) token are allowed self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[False, False, True, True, True], [True, True, False, False, True]] ) def empty_prefix_allowed_tokens_fn(batch_id, inputs_ids): return [] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(empty_prefix_allowed_tokens_fn, 1) self.assertRaises(ValueError, prefix_constrained_logits_proc, input_ids, scores.clone()) def test_hamming_diversity(self): vocab_size = 4 num_beams = 2 num_beam_groups = 2 scores = self._get_uniform_logits(num_beams, vocab_size) # batch_idx = 0 -> index batch_idx * num_beam_groups -> idx = 0 * 2 = 0 -> penalises tokens 1 # batch_idx = 1 -> index batch_idx * num_beam_groups -> idx = 1 * 2 = 2 -> penalises tokens 1 current_tokens = torch.tensor([0, 3, 1, 2], device=torch_device, dtype=torch.long) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=1.0, num_beams=num_beams, num_beam_groups=num_beam_groups ) processed_scores = diversity_logits_processor(None, scores, current_tokens, 1) self.assertTrue( torch.allclose( processed_scores[0], torch.tensor([-0.7500, 0.2500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) self.assertTrue( torch.allclose( processed_scores[1], torch.tensor([0.2500, -0.7500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) # check that all scores are -inf except the bos_token_id score input_ids = ids_tensor((batch_size, 1), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = ForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) # check that all scores are -inf except the eos_token_id when max_length-1 is reached input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached input_ids = ids_tensor((batch_size, 3), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_remove_nan_inf_logits_processor(self): scores = torch.tensor( [[0.0, 0.7, 0.8, float("nan")], [0.1, float("inf"), 0.3, float("-inf")]], device=torch_device ) input_ids = ids_tensor((2, 4), vocab_size=20) logits_processor = InfNanRemoveLogitsProcessor() scores = logits_processor(input_ids, scores) self.assertTrue( torch.allclose( scores, torch.tensor( [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, torch.finfo(scores.dtype).min]], device=torch_device, ), atol=1e-6, ) ) def test_exponential_decay_length_penalty(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids = ids_tensor((batch_size, 2), vocab_size=vocab_size) input_ids_seq_length = input_ids.shape[-1] length_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=(penalty_start, penalty_factor), eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) # check that penalty is not applied before start scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_before_start = length_decay_processor(input_ids, scores_before_start) self.assertListEqual(scores_before_start[:, eos_token_id].tolist(), scores[:, eos_token_id].tolist()) # check that penalty is applied after start input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = self._get_uniform_logits(batch_size, vocab_size) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) # check the penalty increases negative scores input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = torch.neg(self._get_uniform_logits(batch_size, vocab_size)) scores_after_start = torch.clone(scores) # clone scores as precessor updates them inplace scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) def test_normalization(self): input_ids = None scores = torch.tensor( [[-23.18, -29.96, -43.54, 47.77], [-33.58, -26.87, -32.96, 22.51]], device=torch_device, dtype=torch.float ) logit_normalization = LogitNormalization() normalized_scores = logit_normalization(input_ids, scores).exp() ones = torch.ones(scores.shape[0], device=torch_device, dtype=torch.float) self.assertTrue(normalized_scores.sum(dim=-1).allclose(ones)) self.assertTrue(normalized_scores.allclose(scores.softmax(dim=-1))) def test_classifier_free_guidance(self): class Namespace(dict): pass logits_uncond = torch.tensor([[[1.0, 0, 1.5]]]) logits_cond = torch.tensor([[[1.0, 1.0, 1.0]]]) def dummy_model(input_ids, attention_mask, use_cache=True, past_key_values=None): out = Namespace() out.logits = logits_uncond out.past_key_values = None return out def lsm(x): return torch.nn.functional.log_softmax(x, dim=-1) # explicit unconditional prompt + attention mask input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor( 1.5, dummy_model, input_ids, torch.ones_like(input_ids, dtype=torch.long) ) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # explicit unconditional prompt input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model, input_ids) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) # all implicit input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) def test_early_stop_processor(self): input_ids = None eos_token_id = 2 min_eos_p = 0.1 ## some small float scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 ## less than log(min_eos_p) esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], float("-inf")], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/conftest.py
# we define a fixture function below and it will be "used" by # referencing its name from tests import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # defaults region @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_multi_node_model_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/__init__.py
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_multi_node_data_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(2,)]) def test_script(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/README.md
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/transformers.git@v4.5.0.rc0 # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
0
hf_public_repos/transformers/tests
hf_public_repos/transformers/tests/sagemaker/test_single_node_gpu.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class SingleNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count=1): # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-single", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def test_glue(self): # create estimator estimator = self.create_estimator() # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust ist with vX.X.X for installing version specific transforms
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf.py
import argparse import logging import sys import time import tensorflow as tf from datasets import load_dataset from transformers import AutoTokenizer, TFAutoModelForSequenceClassification if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=1) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) args, _ = parser.parse_known_args() # overwrite batch size until we have tf_glue.py args.per_device_train_batch_size = 16 args.per_device_eval_batch_size = 16 # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) train_dataset = train_dataset.shuffle().select(range(5000)) # smaller the size for train dataset to 5k test_dataset = test_dataset.shuffle().select(range(500)) # smaller the size for test dataset to 500 # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])).batch( args.per_device_train_batch_size ) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])).batch( args.per_device_eval_batch_size ) # fine optimizer and loss optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) start_train_time = time.time() train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.per_device_train_batch_size) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") for key, value in train_results.history.items(): logger.info(f" {key} = {value}")
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py
import argparse import logging import os import sys import time import tensorflow as tf from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, TFAutoModelForSequenceClassification from transformers.utils import is_sagemaker_dp_enabled if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled(): SDP_ENABLED = True os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge" import smdistributed.dataparallel.tensorflow as sdp else: SDP_ENABLED = False def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None): pbar = tqdm(train_dataset) for i, batch in enumerate(pbar): with tf.GradientTape() as tape: inputs, targets = batch outputs = model(batch) loss_value = loss(targets, outputs.logits) if SDP_ENABLED: tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True) grads = tape.gradient(loss_value, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) pbar.set_description(f"Loss: {loss_value:.4f}") if SDP_ENABLED and i == 0: sdp.broadcast_variables(model.variables, root_rank=0) sdp.broadcast_variables(opt.variables(), root_rank=0) if max_steps and i >= max_steps: break train_results = {"loss": loss_value.numpy()} return train_results def get_datasets(tokenizer, train_batch_size, eval_batch_size): # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) parser.add_argument("--max_steps", type=int, default=None) # Data, model, and output directories parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) args, _ = parser.parse_known_args() # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) if SDP_ENABLED: sdp.init() gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU") # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # get datasets tf_train_dataset, tf_test_dataset = get_datasets( tokenizer=tokenizer, train_batch_size=args.per_device_train_batch_size, eval_batch_size=args.per_device_eval_batch_size, ) # fine optimizer and loss optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # Training if args.do_train: # train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size) start_train_time = time.time() train_results = fit( model, loss, optimizer, tf_train_dataset, args.epochs, args.per_device_train_batch_size, max_steps=args.max_steps, ) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") output_eval_file = os.path.join(args.output_dir, "train_results.txt") if not SDP_ENABLED or sdp.rank() == 0: with open(output_eval_file, "w") as writer: logger.info("***** Train results *****") logger.info(train_results) for key, value in train_results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Evaluation if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0): result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True) logger.info("*** Evaluate ***") output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") logger.info(result) for key, value in result.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Save result if SDP_ENABLED: if sdp.rank() == 0: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) else: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir)
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust it with vX.X.X for installing version specific transforms datasets==1.8.0
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/run_ddp.py
import json import logging import os import subprocess from argparse import ArgumentParser logger = logging.getLogger(__name__) def parse_args(): parser = ArgumentParser() parsed, unknown = parser.parse_known_args() for arg in unknown: if arg.startswith(("-", "--")): parser.add_argument(arg.split("=")[0]) return parser.parse_args() def main(): args = parse_args() port = 8888 num_gpus = int(os.environ["SM_NUM_GPUS"]) hosts = json.loads(os.environ["SM_HOSTS"]) num_nodes = len(hosts) current_host = os.environ["SM_CURRENT_HOST"] rank = hosts.index(current_host) os.environ["NCCL_DEBUG"] = "INFO" if num_nodes > 1: cmd = f"""python -m torch.distributed.launch \ --nnodes={num_nodes} \ --node_rank={rank} \ --nproc_per_node={num_gpus} \ --master_addr={hosts[0]} \ --master_port={port} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" else: cmd = f"""python -m torch.distributed.launch \ --nproc_per_node={num_gpus} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" try: subprocess.run(cmd, shell=True) except Exception as e: logger.info(e) if __name__ == "__main__": main()
0
hf_public_repos/transformers/tests/sagemaker/scripts
hf_public_repos/transformers/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import load_dataset, load_metric import transformers from transformers import ( # Trainer,; TrainingArguments, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, default_data_collator, set_seed, ) # Will import SageMaker Model parallelism specific Trainer from transformers.sagemaker import SageMakerTrainer as Trainer from transformers.sagemaker import SageMakerTrainingArguments as TrainingArguments from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.4.2") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if training_args.should_log: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset("glue", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files datasets = load_dataset("csv", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets and "validation_matched" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in datasets and "test_matched" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: checkpoint = model_args.model_name_or_path train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Test ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") test_datasets.append(datasets["test_mismatched"]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset = test_dataset.remove_columns("label") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_test_file, "w") as writer: logger.info(f"***** Test results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/check_docstrings.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks all docstrings of public objects have an argument section matching their signature. Use from the root of the repo with: ```bash python utils/check_docstrings.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). To auto-fix issues run: ```bash python utils/check_docstrings.py --fix_and_overwrite ``` which is used by `make fix-copies` (note that this fills what it cans, you might have to manually fill information like argument descriptions). """ import argparse import ast import enum import inspect import operator as op import re from pathlib import Path from typing import Any, Optional, Tuple, Union from check_repo import ignore_undocumented from transformers.utils import direct_transformers_import PATH_TO_TRANSFORMERS = Path("src").resolve() / "transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) OPTIONAL_KEYWORD = "*optional*" # Re pattern that catches args blocks in docstrings (with all variation around the name supported). _re_args = re.compile(r"^\s*(Args?|Arguments?|Attributes?|Params?|Parameters?):\s*$") # Re pattern that parses the start of an arg block: catches <name> (<description>) in those lines. _re_parse_arg = re.compile(r"^(\s*)(\S+)\s+\((.+)\)(?:\:|$)") # Re pattern that parses the end of a description of an arg (catches the default in *optional*, defaults to xxx). _re_parse_description = re.compile(r"\*optional\*, defaults to (.*)$") # This is a temporary list of objects to ignore while we progressively fix them. Do not add anything here, fix the # docstrings instead. If formatting should be ignored for the docstring, you can put a comment # no-format on the # line before the docstring. OBJECTS_TO_IGNORE = [ # Deprecated "InputExample", "InputFeatures", # Signature is *args/**kwargs # "PretrainedConfig", #ignored but could be fixed # "GenerationConfig", #ignored but could be fixed "TFSequenceSummary", "TFBertTokenizer", "TFGPT2Tokenizer", # Missing arguments in the docstring "ASTFeatureExtractor", "AlbertModel", "AlbertTokenizerFast", "AlignTextModel", "AlignVisionConfig", "AudioClassificationPipeline", "AutoformerConfig", "AutomaticSpeechRecognitionPipeline", "AzureOpenAiAgent", "BarkCoarseConfig", "BarkConfig", "BarkFineConfig", "BarkSemanticConfig", "BartConfig", "BartTokenizerFast", "BarthezTokenizerFast", "BeitModel", "BertConfig", "BertJapaneseTokenizer", "BertModel", "BertTokenizerFast", "BigBirdConfig", "BigBirdForQuestionAnswering", "BigBirdModel", "BigBirdPegasusConfig", "BigBirdTokenizerFast", "BitImageProcessor", "BlenderbotConfig", "BlenderbotSmallConfig", "BlenderbotSmallTokenizerFast", "BlenderbotTokenizerFast", "Blip2QFormerConfig", "Blip2VisionConfig", "BlipTextConfig", "BlipVisionConfig", "BloomConfig", "BloomTokenizerFast", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", "BrosModel", "CamembertConfig", "CamembertModel", "CamembertTokenizerFast", "CanineModel", "CanineTokenizer", "ChineseCLIPTextModel", "ClapTextConfig", "ConditionalDetrConfig", "ConditionalDetrImageProcessor", "ConvBertConfig", "ConvBertTokenizerFast", "ConvNextConfig", "ConvNextV2Config", "ConversationalPipeline", "CpmAntTokenizer", "CvtConfig", "CvtModel", "DeiTImageProcessor", "DPRReaderTokenizer", "DPRReaderTokenizerFast", "DPTModel", "Data2VecAudioConfig", "Data2VecTextConfig", "Data2VecTextModel", "Data2VecVisionModel", "DataCollatorForLanguageModeling", "DebertaConfig", "DebertaV2Config", "DebertaV2Tokenizer", "DebertaV2TokenizerFast", "DecisionTransformerConfig", "DeformableDetrConfig", "DeformableDetrImageProcessor", "DeiTModel", "DepthEstimationPipeline", "DetaConfig", "DetaImageProcessor", "DetrConfig", "DetrImageProcessor", "DinatModel", "DistilBertConfig", "DistilBertTokenizerFast", "DocumentQuestionAnsweringPipeline", "DonutSwinModel", "EarlyStoppingCallback", "EfficientFormerConfig", "EfficientFormerImageProcessor", "EfficientNetConfig", "ElectraConfig", "ElectraTokenizerFast", "EncoderDecoderModel", "EncoderRepetitionPenaltyLogitsProcessor", "ErnieConfig", "ErnieMConfig", "ErnieMModel", "ErnieModel", "ErnieMTokenizer", "EsmConfig", "EsmModel", "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxBartForCausalLM", "FlaxBartForConditionalGeneration", "FlaxBartForQuestionAnswering", "FlaxBartForSequenceClassification", "FlaxBartModel", "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBertForCausalLM", "FlaxBertForMaskedLM", "FlaxBertForMultipleChoice", "FlaxBertForNextSentencePrediction", "FlaxBertForPreTraining", "FlaxBertForQuestionAnswering", "FlaxBertForSequenceClassification", "FlaxBertForTokenClassification", "FlaxBertModel", "FlaxBigBirdForCausalLM", "FlaxBigBirdForMaskedLM", "FlaxBigBirdForMultipleChoice", "FlaxBigBirdForPreTraining", "FlaxBigBirdForQuestionAnswering", "FlaxBigBirdForSequenceClassification", "FlaxBigBirdForTokenClassification", "FlaxBigBirdModel", "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBloomForCausalLM", "FlaxBloomModel", "FlaxCLIPModel", "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxElectraForCausalLM", "FlaxElectraForMaskedLM", "FlaxElectraForMultipleChoice", "FlaxElectraForPreTraining", "FlaxElectraForQuestionAnswering", "FlaxElectraForSequenceClassification", "FlaxElectraForTokenClassification", "FlaxElectraModel", "FlaxEncoderDecoderModel", "FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMarianMTModel", "FlaxMarianModel", "FlaxOPTForCausalLM", "FlaxPegasusForConditionalGeneration", "FlaxPegasusModel", "FlaxRegNetForImageClassification", "FlaxRegNetModel", "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxSpeechEncoderDecoderModel", "FlaxViTForImageClassification", "FlaxViTModel", "FlaxVisionEncoderDecoderModel", "FlaxVisionTextDualEncoderModel", "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWhisperForAudioClassification", "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperTimeStampLogitsProcessor", "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FNetConfig", "FNetModel", "FNetTokenizerFast", "FSMTConfig", "FeatureExtractionPipeline", "FillMaskPipeline", "FlaubertConfig", "FlavaConfig", "FlavaForPreTraining", "FlavaImageModel", "FlavaImageProcessor", "FlavaMultimodalModel", "FlavaTextConfig", "FlavaTextModel", "FocalNetModel", "FunnelTokenizerFast", "GPTBigCodeConfig", "GPTJConfig", "GPTNeoXConfig", "GPTNeoXJapaneseConfig", "GPTNeoXTokenizerFast", "GPTSanJapaneseConfig", "GitConfig", "GitVisionConfig", "GraphormerConfig", "GroupViTTextConfig", "GroupViTVisionConfig", "HerbertTokenizerFast", "HubertConfig", "HubertForCTC", "IBertConfig", "IBertModel", "IdeficsConfig", "IdeficsProcessor", "ImageClassificationPipeline", "ImageGPTConfig", "ImageSegmentationPipeline", "ImageToImagePipeline", "ImageToTextPipeline", "InformerConfig", "InstructBlipQFormerConfig", "JukeboxPriorConfig", "JukeboxTokenizer", "LEDConfig", "LEDTokenizerFast", "LayoutLMForQuestionAnswering", "LayoutLMTokenizerFast", "LayoutLMv2Config", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2TokenizerFast", "LayoutLMv3Config", "LayoutLMv3ImageProcessor", "LayoutLMv3TokenizerFast", "LayoutXLMTokenizerFast", "LevitConfig", "LiltConfig", "LiltModel", "LongT5Config", "LongformerConfig", "LongformerModel", "LongformerTokenizerFast", "LukeModel", "LukeTokenizer", "LxmertTokenizerFast", "M2M100Config", "M2M100Tokenizer", "MarkupLMProcessor", "MaskGenerationPipeline", "MBart50TokenizerFast", "MBartConfig", "MCTCTFeatureExtractor", "MPNetConfig", "MPNetModel", "MPNetTokenizerFast", "MT5Config", "MT5TokenizerFast", "MarianConfig", "MarianTokenizer", "MarkupLMConfig", "MarkupLMModel", "MarkupLMTokenizer", "MarkupLMTokenizerFast", "Mask2FormerConfig", "MaskFormerConfig", "MaxTimeCriteria", "MegaConfig", "MegaModel", "MegatronBertConfig", "MegatronBertForPreTraining", "MegatronBertModel", "MobileBertConfig", "MobileBertModel", "MobileBertTokenizerFast", "MobileNetV1ImageProcessor", "MobileNetV1Model", "MobileNetV2ImageProcessor", "MobileNetV2Model", "MobileViTModel", "MobileViTV2Model", "MLukeTokenizer", "MraConfig", "MusicgenDecoderConfig", "MusicgenForConditionalGeneration", "MvpConfig", "MvpTokenizerFast", "MT5Tokenizer", "NatModel", "NerPipeline", "NezhaConfig", "NezhaModel", "NllbMoeConfig", "NllbTokenizer", "NllbTokenizerFast", "NystromformerConfig", "OPTConfig", "ObjectDetectionPipeline", "OneFormerProcessor", "OpenAIGPTTokenizerFast", "OpenLlamaConfig", "PLBartConfig", "PegasusConfig", "PegasusTokenizer", "PegasusTokenizerFast", "PegasusXConfig", "PerceiverImageProcessor", "PerceiverModel", "PerceiverTokenizer", "PersimmonConfig", "Pipeline", "Pix2StructConfig", "Pix2StructTextConfig", "PLBartTokenizer", "Pop2PianoConfig", "PreTrainedTokenizer", "PreTrainedTokenizerBase", "PreTrainedTokenizerFast", "PrefixConstrainedLogitsProcessor", "ProphetNetConfig", "QDQBertConfig", "QDQBertModel", "QuestionAnsweringPipeline", "RagConfig", "RagModel", "RagRetriever", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmConfig", "RealmForOpenQA", "RealmScorer", "RealmTokenizerFast", "ReformerConfig", "ReformerTokenizerFast", "RegNetConfig", "RemBertConfig", "RemBertModel", "RemBertTokenizer", "RemBertTokenizerFast", "RepetitionPenaltyLogitsProcessor", "RetriBertConfig", "RetriBertTokenizerFast", "RoCBertConfig", "RoCBertModel", "RoCBertTokenizer", "RoFormerConfig", "RobertaConfig", "RobertaModel", "RobertaPreLayerNormConfig", "RobertaPreLayerNormModel", "RobertaTokenizerFast", "SEWConfig", "SEWDConfig", "SEWDForCTC", "SEWForCTC", "SamConfig", "SamPromptEncoderConfig", "SeamlessM4TConfig", # use of unconventional markdown "SeamlessM4Tv2Config", # use of unconventional markdown "Seq2SeqTrainingArguments", "SpecialTokensMixin", "Speech2Text2Config", "Speech2Text2Tokenizer", "Speech2TextTokenizer", "SpeechEncoderDecoderModel", "SpeechT5Config", "SpeechT5Model", "SplinterConfig", "SplinterTokenizerFast", "SqueezeBertTokenizerFast", "SummarizationPipeline", "Swin2SRImageProcessor", "Swinv2Model", "SwitchTransformersConfig", "T5Config", "T5Tokenizer", "T5TokenizerFast", "TableQuestionAnsweringPipeline", "TableTransformerConfig", "TapasConfig", "TapasModel", "TapasTokenizer", "Text2TextGenerationPipeline", "TextClassificationPipeline", "TextGenerationPipeline", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertModel", "TFBartForConditionalGeneration", "TFBartForSequenceClassification", "TFBartModel", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", "TFBertForPreTraining", "TFBertForQuestionAnswering", "TFBertForSequenceClassification", "TFBertForTokenClassification", "TFBertModel", "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlipForConditionalGeneration", "TFBlipForImageTextRetrieval", "TFBlipForQuestionAnswering", "TFCLIPModel", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCamembertForCausalLM", "TFCamembertForMaskedLM", "TFCamembertForMultipleChoice", "TFCamembertForQuestionAnswering", "TFCamembertForSequenceClassification", "TFCamembertForTokenClassification", "TFCamembertModel", "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", "TFConvBertModel", "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextV2Model", # Parsing issue. Equivalent to PT ConvNextV2Model, see PR #25558 "TFConvNextV2ForImageClassification", "TFCvtForImageClassification", "TFCvtModel", "TFDPRReader", "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaV2ForMaskedLM", "TFDebertaV2ForMultipleChoice", "TFDebertaV2ForQuestionAnswering", "TFDebertaV2ForSequenceClassification", "TFDebertaV2ForTokenClassification", "TFDebertaV2Model", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertModel", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", "TFElectraForQuestionAnswering", "TFElectraForSequenceClassification", "TFElectraForTokenClassification", "TFElectraModel", "TFEncoderDecoderModel", "TFEsmForMaskedLM", "TFEsmForSequenceClassification", "TFEsmForTokenClassification", "TFEsmModel", "TFFlaubertForMultipleChoice", "TFFlaubertForQuestionAnsweringSimple", "TFFlaubertForSequenceClassification", "TFFlaubertForTokenClassification", "TFFlaubertModel", "TFFlaubertWithLMHeadModel", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFGPT2DoubleHeadsModel", "TFGPT2ForSequenceClassification", "TFGPT2LMHeadModel", "TFGPT2Model", "TFGPTJForCausalLM", "TFGPTJForQuestionAnswering", "TFGPTJForSequenceClassification", "TFGPTJModel", "TFGroupViTModel", "TFHubertForCTC", "TFHubertModel", "TFLEDForConditionalGeneration", "TFLEDModel", "TFLayoutLMForMaskedLM", "TFLayoutLMForQuestionAnswering", "TFLayoutLMForSequenceClassification", "TFLayoutLMForTokenClassification", "TFLayoutLMModel", "TFLayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForTokenClassification", "TFLayoutLMv3Model", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", "TFLongformerForSequenceClassification", "TFLongformerForTokenClassification", "TFLongformerModel", "TFLxmertForPreTraining", "TFLxmertModel", "TFMBartForConditionalGeneration", "TFMBartModel", "TFMPNetForMaskedLM", "TFMPNetForMultipleChoice", "TFMPNetForQuestionAnswering", "TFMPNetForSequenceClassification", "TFMPNetForTokenClassification", "TFMPNetModel", "TFMarianMTModel", "TFMarianModel", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertModel", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFOPTForCausalLM", "TFOPTModel", "TFOpenAIGPTDoubleHeadsModel", "TFOpenAIGPTForSequenceClassification", "TFOpenAIGPTLMHeadModel", "TFOpenAIGPTModel", "TFPegasusForConditionalGeneration", "TFPegasusModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertModel", "TFRepetitionPenaltyLogitsProcessor", "TFResNetForImageClassification", "TFResNetModel", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", "TFRoFormerModel", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaModel", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormModel", "TFSamModel", "TFSegformerForImageClassification", "TFSegformerForSemanticSegmentation", "TFSegformerModel", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFT5EncoderModel", "TFT5ForConditionalGeneration", "TFT5Model", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLModel", "TFViTForImageClassification", "TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTModel", "TFVisionEncoderDecoderModel", "TFVisionTextDualEncoderModel", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFXGLMForCausalLM", "TFXGLMModel", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMModel", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMWithLMHeadModel", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetModel", "TimeSeriesTransformerConfig", "TokenClassificationPipeline", "TrOCRConfig", "TrainerState", "TrainingArguments", "TrajectoryTransformerConfig", "TranslationPipeline", "TvltImageProcessor", "UMT5Config", "UperNetConfig", "UperNetForSemanticSegmentation", "ViTHybridImageProcessor", "ViTHybridModel", "ViTMSNModel", "ViTModel", "VideoClassificationPipeline", "ViltConfig", "ViltForImagesAndTextClassification", "ViltModel", "VisionEncoderDecoderModel", "VisionTextDualEncoderModel", "VisualBertConfig", "VisualBertModel", "VisualQuestionAnsweringPipeline", "VitMatteForImageMatting", "VitsTokenizer", "VivitModel", "Wav2Vec2CTCTokenizer", "Wav2Vec2Config", "Wav2Vec2ConformerConfig", "Wav2Vec2ConformerForCTC", "Wav2Vec2FeatureExtractor", "Wav2Vec2PhonemeCTCTokenizer", "WavLMConfig", "WavLMForCTC", "WhisperConfig", "WhisperFeatureExtractor", "WhisperForAudioClassification", "XCLIPTextConfig", "XCLIPVisionConfig", "XGLMConfig", "XGLMModel", "XGLMTokenizerFast", "XLMConfig", "XLMProphetNetConfig", "XLMRobertaConfig", "XLMRobertaModel", "XLMRobertaTokenizerFast", "XLMRobertaXLConfig", "XLMRobertaXLModel", "XLNetConfig", "XLNetTokenizerFast", "XmodConfig", "XmodModel", "YolosImageProcessor", "YolosModel", "YosoConfig", "ZeroShotAudioClassificationPipeline", "ZeroShotClassificationPipeline", "ZeroShotImageClassificationPipeline", "ZeroShotObjectDetectionPipeline", ] # Supported math operations when interpreting the value of defaults. MATH_OPERATORS = { ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor, ast.USub: op.neg, } def find_indent(line: str) -> int: """ Returns the number of spaces that start a line indent. """ search = re.search(r"^(\s*)(?:\S|$)", line) if search is None: return 0 return len(search.groups()[0]) def stringify_default(default: Any) -> str: """ Returns the string representation of a default value, as used in docstring: numbers are left as is, all other objects are in backtiks. Args: default (`Any`): The default value to process Returns: `str`: The string representation of that default. """ if isinstance(default, bool): # We need to test for bool first as a bool passes isinstance(xxx, (int, float)) return f"`{default}`" elif isinstance(default, enum.Enum): # We need to test for enum first as an enum with int values will pass isinstance(xxx, (int, float)) return f"`{str(default)}`" elif isinstance(default, int): return str(default) elif isinstance(default, float): result = str(default) return str(round(default, 2)) if len(result) > 6 else result elif isinstance(default, str): return str(default) if default.isnumeric() else f'`"{default}"`' elif isinstance(default, type): return f"`{default.__name__}`" else: return f"`{default}`" def eval_math_expression(expression: str) -> Optional[Union[float, int]]: # Mainly taken from the excellent https://stackoverflow.com/a/9558001 """ Evaluate (safely) a mathematial expression and returns its value. Args: expression (`str`): The expression to evaluate. Returns: `Optional[Union[float, int]]`: Returns `None` if the evaluation fails in any way and the value computed otherwise. Example: ```py >>> eval_expr('2^6') 4 >>> eval_expr('2**6') 64 >>> eval_expr('1 + 2*3**(4^5) / (6 + -7)') -5.0 ``` """ try: return eval_node(ast.parse(expression, mode="eval").body) except TypeError: return def eval_node(node): if isinstance(node, ast.Num): # <number> return node.n elif isinstance(node, ast.BinOp): # <left> <operator> <right> return MATH_OPERATORS[type(node.op)](eval_node(node.left), eval_node(node.right)) elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1 return MATH_OPERATORS[type(node.op)](eval_node(node.operand)) else: raise TypeError(node) def replace_default_in_arg_description(description: str, default: Any) -> str: """ Catches the default value in the description of an argument inside a docstring and replaces it by the value passed. Args: description (`str`): The description of an argument in a docstring to process. default (`Any`): The default value that whould be in the docstring of that argument. Returns: `str`: The description updated with the new default value. """ # Lots of docstrings have `optional` or **opational** instead of *optional* so we do this fix here. description = description.replace("`optional`", OPTIONAL_KEYWORD) description = description.replace("**optional**", OPTIONAL_KEYWORD) if default is inspect._empty: # No default, make sure the description doesn't have any either idx = description.find(OPTIONAL_KEYWORD) if idx != -1: description = description[:idx].rstrip() if description.endswith(","): description = description[:-1].rstrip() elif default is None: # Default None are not written, we just set `*optional*`. If there is default that is not None specified in the # description, we do not erase it (as sometimes we set the default to `None` because the default is a mutable # object). idx = description.find(OPTIONAL_KEYWORD) if idx == -1: description = f"{description}, {OPTIONAL_KEYWORD}" elif re.search(r"defaults to `?None`?", description) is not None: len_optional = len(OPTIONAL_KEYWORD) description = description[: idx + len_optional] else: str_default = None # For numbers we may have a default that is given by a math operation (1/255 is really popular). We don't # want to replace those by their actual values. if isinstance(default, (int, float)) and re.search("defaults to `?(.*?)(?:`|$)", description) is not None: # Grab the default and evaluate it. current_default = re.search("defaults to `?(.*?)(?:`|$)", description).groups()[0] if default == eval_math_expression(current_default): try: # If it can be directly converted to the type of the default, it's a simple value str_default = str(type(default)(current_default)) except Exception: # Otherwise there is a math operator so we add a code block. str_default = f"`{current_default}`" if str_default is None: str_default = stringify_default(default) # Make sure default match if OPTIONAL_KEYWORD not in description: description = f"{description}, {OPTIONAL_KEYWORD}, defaults to {str_default}" elif _re_parse_description.search(description) is None: idx = description.find(OPTIONAL_KEYWORD) len_optional = len(OPTIONAL_KEYWORD) description = f"{description[:idx + len_optional]}, defaults to {str_default}" else: description = _re_parse_description.sub(rf"*optional*, defaults to {str_default}", description) return description def get_default_description(arg: inspect.Parameter) -> str: """ Builds a default description for a parameter that was not documented. Args: arg (`inspect.Parameter`): The argument in the signature to generate a description for. Returns: `str`: The description. """ if arg.annotation is inspect._empty: arg_type = "<fill_type>" elif hasattr(arg.annotation, "__name__"): arg_type = arg.annotation.__name__ else: arg_type = str(arg.annotation) if arg.default is inspect._empty: return f"`{arg_type}`" elif arg.default is None: return f"`{arg_type}`, {OPTIONAL_KEYWORD}" else: str_default = stringify_default(arg.default) return f"`{arg_type}`, {OPTIONAL_KEYWORD}, defaults to {str_default}" def find_source_file(obj: Any) -> Path: """ Finds the source file of an object. Args: obj (`Any`): The object whose source file we are looking for. Returns: `Path`: The source file. """ module = obj.__module__ obj_file = PATH_TO_TRANSFORMERS for part in module.split(".")[1:]: obj_file = obj_file / part return obj_file.with_suffix(".py") def match_docstring_with_signature(obj: Any) -> Optional[Tuple[str, str]]: """ Matches the docstring of an object with its signature. Args: obj (`Any`): The object to process. Returns: `Optional[Tuple[str, str]]`: Returns `None` if there is no docstring or no parameters documented in the docstring, otherwise returns a tuple of two strings: the current documentation of the arguments in the docstring and the one matched with the signature. """ if len(getattr(obj, "__doc__", "")) == 0: # Nothing to do, there is no docstring. return # Read the docstring in the source code to see if there is a special command to ignore this object. try: source, _ = inspect.getsourcelines(obj) except OSError: source = [] idx = 0 while idx < len(source) and '"""' not in source[idx]: idx += 1 ignore_order = False if idx < len(source): line_before_docstring = source[idx - 1] if re.search(r"^\s*#\s*no-format\s*$", line_before_docstring): # This object is ignored return elif re.search(r"^\s*#\s*ignore-order\s*$", line_before_docstring): ignore_order = True # Read the signature signature = inspect.signature(obj).parameters obj_doc_lines = obj.__doc__.split("\n") # Get to the line where we start documenting arguments idx = 0 while idx < len(obj_doc_lines) and _re_args.search(obj_doc_lines[idx]) is None: idx += 1 if idx == len(obj_doc_lines): # Nothing to do, no parameters are documented. return indent = find_indent(obj_doc_lines[idx]) arguments = {} current_arg = None idx += 1 start_idx = idx # Keep going until the arg section is finished (nonempty line at the same indent level) or the end of the docstring. while idx < len(obj_doc_lines) and ( len(obj_doc_lines[idx].strip()) == 0 or find_indent(obj_doc_lines[idx]) > indent ): if find_indent(obj_doc_lines[idx]) == indent + 4: # New argument -> let's generate the proper doc for it re_search_arg = _re_parse_arg.search(obj_doc_lines[idx]) if re_search_arg is not None: _, name, description = re_search_arg.groups() current_arg = name if name in signature: default = signature[name].default if signature[name].kind is inspect._ParameterKind.VAR_KEYWORD: default = None new_description = replace_default_in_arg_description(description, default) else: new_description = description init_doc = _re_parse_arg.sub(rf"\1\2 ({new_description}):", obj_doc_lines[idx]) arguments[current_arg] = [init_doc] elif current_arg is not None: arguments[current_arg].append(obj_doc_lines[idx]) idx += 1 # We went too far by one (perhaps more if there are a lot of new lines) idx -= 1 while len(obj_doc_lines[idx].strip()) == 0: arguments[current_arg] = arguments[current_arg][:-1] idx -= 1 # And we went too far by one again. idx += 1 old_doc_arg = "\n".join(obj_doc_lines[start_idx:idx]) old_arguments = list(arguments.keys()) arguments = {name: "\n".join(doc) for name, doc in arguments.items()} # Add missing arguments with a template for name in set(signature.keys()) - set(arguments.keys()): arg = signature[name] # We ignore private arguments or *args/**kwargs (unless they are documented by the user) if name.startswith("_") or arg.kind in [ inspect._ParameterKind.VAR_KEYWORD, inspect._ParameterKind.VAR_POSITIONAL, ]: arguments[name] = "" else: arg_desc = get_default_description(arg) arguments[name] = " " * (indent + 4) + f"{name} ({arg_desc}): <fill_docstring>" # Arguments are sorted by the order in the signature unless a special comment is put. if ignore_order: new_param_docs = [arguments[name] for name in old_arguments if name in signature] missing = set(signature.keys()) - set(old_arguments) new_param_docs.extend([arguments[name] for name in missing if len(arguments[name]) > 0]) else: new_param_docs = [arguments[name] for name in signature.keys() if len(arguments[name]) > 0] new_doc_arg = "\n".join(new_param_docs) return old_doc_arg, new_doc_arg def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str): """ Fixes the docstring of an object by replacing its arguments documentaiton by the one matched with the signature. Args: obj (`Any`): The object whose dostring we are fixing. old_doc_args (`str`): The current documentation of the parameters of `obj` in the docstring (as returned by `match_docstring_with_signature`). new_doc_args (`str`): The documentation of the parameters of `obj` matched with its signature (as returned by `match_docstring_with_signature`). """ # Read the docstring in the source code and make sure we have the right part of the docstring source, line_number = inspect.getsourcelines(obj) # Get to the line where we start documenting arguments idx = 0 while idx < len(source) and _re_args.search(source[idx]) is None: idx += 1 if idx == len(source): # Args are not defined in the docstring of this object return # Get to the line where we stop documenting arguments indent = find_indent(source[idx]) idx += 1 start_idx = idx while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent): idx += 1 idx -= 1 while len(source[idx].strip()) == 0: idx -= 1 idx += 1 if "".join(source[start_idx:idx])[:-1] != old_doc_args: # Args are not fully defined in the docstring of this object return obj_file = find_source_file(obj) with open(obj_file, "r", encoding="utf-8") as f: content = f.read() # Replace content lines = content.split("\n") lines = lines[: line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1 :] print(f"Fixing the docstring of {obj.__name__} in {obj_file}.") with open(obj_file, "w", encoding="utf-8") as f: f.write("\n".join(lines)) def check_docstrings(overwrite: bool = False): """ Check docstrings of all public objects that are callables and are documented. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to fix inconsistencies or not. """ failures = [] hard_failures = [] to_clean = [] for name in dir(transformers): # Skip objects that are private or not documented. if name.startswith("_") or ignore_undocumented(name) or name in OBJECTS_TO_IGNORE: continue obj = getattr(transformers, name) if not callable(obj) or not isinstance(obj, type) or getattr(obj, "__doc__", None) is None: continue # Check docstring try: result = match_docstring_with_signature(obj) if result is not None: old_doc, new_doc = result else: old_doc, new_doc = None, None except Exception as e: print(e) hard_failures.append(name) continue if old_doc != new_doc: if overwrite: fix_docstring(obj, old_doc, new_doc) else: failures.append(name) elif not overwrite and new_doc is not None and ("<fill_type>" in new_doc or "<fill_docstring>" in new_doc): to_clean.append(name) # Deal with errors error_message = "" if len(hard_failures) > 0: error_message += ( "The argument part of the docstrings of the following objects could not be processed, check they are " "properly formatted." ) error_message += "\n" + "\n".join([f"- {name}" for name in hard_failures]) if len(failures) > 0: error_message += ( "The following objects docstrings do not match their signature. Run `make fix-copies` to fix this." ) error_message += "\n" + "\n".join([f"- {name}" for name in failures]) if len(to_clean) > 0: error_message += ( "The following objects docstrings contain templates you need to fix: search for `<fill_type>` or " "`<fill_docstring>`." ) error_message += "\n" + "\n".join([f"- {name}" for name in to_clean]) if len(error_message) > 0: error_message = "There was at least one problem when checking docstrings of public objects.\n" + error_message raise ValueError(error_message) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_docstrings(overwrite=args.fix_and_overwrite)
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/get_test_info.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") r""" The argument `test_file` in this file refers to a model test file. This should be a string of the from `tests/models/*/test_modeling_*.py`. """ def get_module_path(test_file): """Return the module path of a model test file.""" components = test_file.split(os.path.sep) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"{test_file} instead." ) test_fn = components[-1] if not test_fn.endswith("py"): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead.") if not test_fn.startswith("test_modeling_"): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) components = components[:-1] + [test_fn.replace(".py", "")] test_module_path = ".".join(components) return test_module_path def get_test_module(test_file): """Get the module of a model test file.""" test_module_path = get_module_path(test_file) test_module = importlib.import_module(test_module_path) return test_module def get_tester_classes(test_file): """Get all classes in a model test file whose names ends with `ModelTester`.""" tester_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): if attr.endswith("ModelTester"): tester_classes.append(getattr(test_module, attr)) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_classes(test_file): """Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty. These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of `unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses). """ test_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): attr_value = getattr(test_module, attr) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). model_classes = getattr(attr_value, "all_model_classes", []) if len(model_classes) > 0: test_classes.append(attr_value) # sort with class names return sorted(test_classes, key=lambda x: x.__name__) def get_model_classes(test_file): """Get all model classes that appear in `all_model_classes` attributes in a model test file.""" test_classes = get_test_classes(test_file) model_classes = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes) # sort with class names return sorted(model_classes, key=lambda x: x.__name__) def get_model_tester_from_test_class(test_class): """Get the model tester class of a model test class.""" test = test_class() if hasattr(test, "setUp"): test.setUp() model_tester = None if hasattr(test, "model_tester"): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: model_tester = test.model_tester.__class__ return model_tester def get_test_classes_for_model(test_file, model_class): """Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`.""" test_classes = get_test_classes(test_file) target_test_classes = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(test_class) # sort with class names return sorted(target_test_classes, key=lambda x: x.__name__) def get_tester_classes_for_model(test_file, model_class): """Get all model tester classes in `test_file` that are associated to `model_class`.""" test_classes = get_test_classes_for_model(test_file, model_class) tester_classes = [] for test_class in test_classes: tester_class = get_model_tester_from_test_class(test_class) if tester_class is not None: tester_classes.append(tester_class) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_to_tester_mapping(test_file): """Get a mapping from [test] classes to model tester classes in `test_file`. This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`. """ test_classes = get_test_classes(test_file) test_tester_mapping = {test_class: get_model_tester_from_test_class(test_class) for test_class in test_classes} return test_tester_mapping def get_model_to_test_mapping(test_file): """Get a mapping from model classes to test classes in `test_file`.""" model_classes = get_model_classes(test_file) model_test_mapping = { model_class: get_test_classes_for_model(test_file, model_class) for model_class in model_classes } return model_test_mapping def get_model_to_tester_mapping(test_file): """Get a mapping from model classes to model tester classes in `test_file`.""" model_classes = get_model_classes(test_file) model_to_tester_mapping = { model_class: get_tester_classes_for_model(test_file, model_class) for model_class in model_classes } return model_to_tester_mapping def to_json(o): """Make the information succinct and easy to read. Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability. """ if isinstance(o, str): return o elif isinstance(o, type): return o.__name__ elif isinstance(o, (list, tuple)): return [to_json(x) for x in o] elif isinstance(o, dict): return {to_json(k): to_json(v) for k, v in o.items()} else: return o
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/update_metadata.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that updates the metadata of the Transformers library in the repository `huggingface/transformers-metadata`. Usage for an update (as used by the GitHub action `update_metadata`): ```bash python utils/update_metadata.py --token <token> --commit_sha <commit_sha> ``` Usage to check all pipelines are properly defined in the constant `PIPELINE_TAGS_AND_AUTO_MODELS` of this script, so that new pipelines are properly added as metadata (as used in `make repo-consistency`): ```bash python utils/update_metadata.py --check-only ``` """ import argparse import collections import os import re import tempfile from typing import Dict, List, Tuple import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py TRANSFORMERS_PATH = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) PIPELINE_TAGS_AND_AUTO_MODELS = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("image-to-image", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES", "AutoModelForImageToImage"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ("text-to-audio", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES", "AutoModelForTextToSpectrogram"), ("text-to-audio", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES", "AutoModelForTextToWaveform"), ] def camel_case_split(identifier: str) -> List[str]: """ Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as seprated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ``` """ # Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches] def get_frameworks_table() -> pd.DataFrame: """ Generates a dataframe containing the supported auto classes for each model type, using the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_prefix_to_model_type = { config.replace("Config", ""): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_prefix_to_model_type: lookup_dict[model_prefix_to_model_type[attr_name]] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys())) all_models = list(all_models) all_models.sort() data = {"model_type": all_models} data["pytorch"] = [pt_models[t] for t in all_models] data["tensorflow"] = [tf_models[t] for t in all_models] data["flax"] = [flax_models[t] for t in all_models] # Now let's find the right processing class for each model. In order we check if there is a Processor, then a # Tokenizer, then a FeatureExtractor, then an ImageProcessor processors = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: processors[t] = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: processors[t] = "AutoTokenizer" elif t in transformers_module.models.auto.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES: processors[t] = "AutoImageProcessor" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: processors[t] = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. processors[t] = "AutoTokenizer" data["processor"] = [processors[t] for t in all_models] return pd.DataFrame(data) def update_pipeline_and_auto_class_table(table: Dict[str, Tuple[str, str]]) -> Dict[str, Tuple[str, str]]: """ Update the table maping models to pipelines and auto classes without removing old keys if they don't exist anymore. Args: table (`Dict[str, Tuple[str, str]]`): The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with which they should be used. Returns: `Dict[str, Tuple[str, str]]`: The updated table in the same format. """ auto_modules = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"] auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings): # The type of pipeline may not exist in this framework if not hasattr(module, mapping): continue # First extract all model_names model_names = [] for name in getattr(module, mapping).values(): if isinstance(name, str): model_names.append(name) else: model_names.extend(list(name)) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names}) return table def update_metadata(token: str, commit_sha: str): """ Update the metadata for the Transformers repo in `huggingface/transformers-metadata`. Args: token (`str`): A valid token giving write access to `huggingface/transformers-metadata`. commit_sha (`str`): The commit SHA on Transformers corresponding to this update. """ frameworks_table = get_frameworks_table() frameworks_dataset = Dataset.from_pandas(frameworks_table) resolved_tags_file = hf_hub_download( "huggingface/transformers-metadata", "pipeline_tags.json", repo_type="dataset", token=token ) tags_dataset = Dataset.from_json(resolved_tags_file) table = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(tags_dataset)) } table = update_pipeline_and_auto_class_table(table) # Sort the model classes to avoid some nondeterministic updates to create false update commits. model_classes = sorted(table.keys()) tags_table = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) tags_dataset = Dataset.from_pandas(tags_table) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json")) tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json")) if commit_sha is not None: commit_message = ( f"Update with commit {commit_sha}\n\nSee: " f"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: commit_message = "Update" upload_folder( repo_id="huggingface/transformers-metadata", folder_path=tmp_dir, repo_type="dataset", token=token, commit_message=commit_message, ) def check_pipeline_tags(): """ Check all pipeline tags are properly defined in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant of this script. """ in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS missing = [] for key in pipeline_tasks: if key not in in_table: model = pipeline_tasks[key]["pt"] if isinstance(model, (list, tuple)): model = model[0] model = model.__name__ if model not in in_table.values(): missing.append(key) if len(missing) > 0: msg = ", ".join(missing) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " f"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") args = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/check_self_hosted_runner.py
import argparse import json import subprocess def get_runner_status(target_runners, token): offline_runners = [] cmd = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) o = output.stdout.decode("utf-8") status = json.loads(o) runners = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(runner) # save the result so we can report them on Slack with open("offline_runners.txt", "w") as fp: fp.write(json.dumps(offline_runners)) if len(offline_runners) > 0: failed = "\n".join([x["name"] for x in offline_runners]) raise ValueError(f"The following runners are offline:\n{failed}") if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) args = parser.parse_args() get_runner_status(args.target_runners, args.token)
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/notification_service_doc_tests.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict, List import requests from slack_sdk import WebClient client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def handle_test_results(test_results): expressions = test_results.split(" ") failed = 0 success = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(expressions): if "failed" in expression: failed += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) return failed, success, time_spent def extract_first_line_failure(failures_short_lines): failures = {} file = None in_error = False for line in failures_short_lines.split("\n"): if re.search(r"_ \[doctest\]", line): in_error = True file = line.split(" ")[2] elif in_error and not line.split(" ")[0].isdigit(): failures[file] = line in_error = False return failures class Message: def __init__(self, title: str, doc_test_results: Dict): self.title = title self._time_spent = doc_test_results["time_spent"].split(",")[0] self.n_success = doc_test_results["success"] self.n_failures = doc_test_results["failures"] self.n_tests = self.n_success + self.n_failures # Failures and success of the modeling tests self.doc_test_results = doc_test_results @property def time(self) -> str: time_spent = [self._time_spent] total_secs = 0 for time in time_spent: time_parts = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(time_parts) == 1: time_parts = [0, 0, time_parts[0]] hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f"{int(hours)}h{int(minutes)}m{int(seconds)}s" @property def header(self) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def no_failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" f" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def category_failures(self) -> List[Dict]: failure_blocks = [] MAX_ERROR_TEXT = 3000 - len("The following examples had failures:\n\n\n\n") - len("[Truncated]\n") line_length = 40 category_failures = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(v, dict)} def single_category_failures(category, failures): text = "" if len(failures) == 0: return "" text += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" for idx, failure in enumerate(failures): new_text = text + f"`{failure}`\n" if len(new_text) > MAX_ERROR_TEXT: text = text + "[Truncated]\n" break text = new_text return text for category, failures in category_failures.items(): report = single_category_failures(category, failures) if len(report) == 0: continue block = { "type": "section", "text": { "type": "mrkdwn", "text": f"The following examples had failures:\n\n\n{report}\n", }, } failure_blocks.append(block) return failure_blocks @property def payload(self) -> str: blocks = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend(self.category_failures) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(blocks) @staticmethod def error_out(): payload = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(payload)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"], text="There was an issue running the tests.", blocks=payload, ) def post(self): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"], blocks=self.payload, text=text, ) def get_reply_blocks(self, job_name, job_link, failures, text): # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary MAX_ERROR_TEXT = 3000 - len("[Truncated]") failure_text = "" for key, value in failures.items(): new_text = failure_text + f"*{key}*\n_{value}_\n\n" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text title = job_name content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ] def post_reply(self): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") job_link = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") sorted_dict = sorted(self.doc_test_results.items(), key=lambda t: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): text = f"*Num failures* :{len(job_result['failed'])} \n" failures = job_result["failures"] blocks = self.get_reply_blocks(job, job_link, failures, text=text) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"], text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) def get_job_links(): run_id = os.environ["GITHUB_RUN_ID"] url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" result = requests.get(url).json() jobs = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}").json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]}) return jobs except Exception as e: print("Unknown error, could not fetch links.", e) return {} def retrieve_artifact(name: str): _artifact = {} if os.path.exists(name): files = os.listdir(name) for file in files: try: with open(os.path.join(name, file), encoding="utf-8") as f: _artifact[file.split(".")[0]] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(name, file)}.") from e return _artifact def retrieve_available_artifacts(): class Artifact: def __init__(self, name: str): self.name = name self.paths = [] def __str__(self): return self.name def add_path(self, path: str): self.paths.append({"name": self.name, "path": path}) _available_artifacts: Dict[str, Artifact] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: artifact_name = directory if artifact_name not in _available_artifacts: _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts if __name__ == "__main__": github_actions_job_links = get_job_links() available_artifacts = retrieve_available_artifacts() docs = collections.OrderedDict( [ ("*.py", "API Examples"), ("*.md", "MD Examples"), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' doc_test_results = { v: { "failed": [], "failures": {}, } for v in docs.values() } # Link to the GitHub Action job doc_test_results["job_link"] = github_actions_job_links.get("run_doctests") artifact_path = available_artifacts["doc_tests_gpu_test_reports"].paths[0] artifact = retrieve_artifact(artifact_path["name"]) if "stats" in artifact: failed, success, time_spent = handle_test_results(artifact["stats"]) doc_test_results["failures"] = failed doc_test_results["success"] = success doc_test_results["time_spent"] = time_spent[1:-1] + ", " all_failures = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): line = line.replace("FAILED ", "") line = line.split()[0].replace("\n", "") if "::" in line: file_path, test = line.split("::") else: file_path, test = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): category = docs[file_regex] doc_test_results[category]["failed"].append(test) failure = all_failures[test] if test in all_failures else "N/A" doc_test_results[category]["failures"][test] = failure break message = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/release.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that prepares the repository for releases (or patches) by updating all versions in the relevant places. It also performs some post-release cleanup, by updating the links in the main README to respective model doc pages (from main to stable). To prepare for a release, use from the root of the repo on the release branch with: ```bash python release.py ``` or use `make pre-release`. To prepare for a patch release, use from the root of the repo on the release branch with: ```bash python release.py --patch ``` or use `make pre-patch`. To do the post-release cleanup, use from the root of the repo on the main branch with: ```bash python release.py --post_release ``` or use `make post-release`. """ import argparse import os import re import packaging.version # All paths are defined with the intent that this script should be run from the root of the repo. PATH_TO_EXAMPLES = "examples/" # This maps a type of file to the pattern to look for when searching where the version is defined, as well as the # template to follow when replacing it with the new version. REPLACE_PATTERNS = { "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), } # This maps a type of file to its path in Transformers REPLACE_FILES = { "init": "src/transformers/__init__.py", "setup": "setup.py", } README_FILE = "README.md" def update_version_in_file(fname: str, version: str, file_type: str): """ Update the version of Transformers in one file. Args: fname (`str`): The path to the file where we want to update the version. version (`str`): The new version to set in the file. file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`). """ with open(fname, "r", encoding="utf-8", newline="\n") as f: code = f.read() re_pattern, replace = REPLACE_PATTERNS[file_type] replace = replace.replace("VERSION", version) code = re_pattern.sub(replace, code) with open(fname, "w", encoding="utf-8", newline="\n") as f: f.write(code) def update_version_in_examples(version: str): """ Update the version in all examples files. Args: version (`str`): The new version to set in the examples. """ for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: if fname.endswith(".py"): update_version_in_file(os.path.join(folder, fname), version, file_type="examples") def global_version_update(version: str, patch: bool = False): """ Update the version in all needed files. Args: version (`str`): The new version to set everywhere. patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: # We don't update the version in the examples for patch releases. update_version_in_examples(version) def clean_main_ref_in_model_list(): """ Replace the links from main doc to stable doc in the model list of the README. """ # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "🤗 Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc", "https://huggingface.co/docs/transformers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version() -> packaging.version.Version: """ Reads the current version in the main __init__. """ with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) def pre_release_work(patch: bool = False): """ Do all the necessary pre-release steps: - figure out the next minor release version and ask confirmation - update the version eveywhere - clean-up the model list in the main README Args: patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release. """ # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if we have found the right version. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list() def post_release_work(): """ Do all the necesarry post-release steps: - figure out the next dev version and ask confirmation - update the version eveywhere - clean-up the model list in the main README """ # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") args = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/check_config_attributes.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py PATH_TO_TRANSFORMERS = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING SPECIAL_CASES_TO_ALLOW = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, "FuyuConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "Pop2PianoConfig": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], # Actually used in the config or generation config, in that case necessary for the sub-components generation "SeamlessM4TConfig": [ "max_new_tokens", "t2u_max_new_tokens", "t2u_decoder_attention_heads", "t2u_decoder_ffn_dim", "t2u_decoder_layers", "t2u_encoder_attention_heads", "t2u_encoder_ffn_dim", "t2u_encoder_layers", "t2u_max_position_embeddings", ], # Actually used in the config or generation config, in that case necessary for the sub-components generation "SeamlessM4Tv2Config": [ "max_new_tokens", "t2u_decoder_attention_heads", "t2u_decoder_ffn_dim", "t2u_decoder_layers", "t2u_encoder_attention_heads", "t2u_encoder_ffn_dim", "t2u_encoder_layers", "t2u_max_position_embeddings", "t2u_variance_pred_dropout", "t2u_variance_predictor_embed_dim", "t2u_variance_predictor_hidden_dim", "t2u_variance_predictor_kernel_size", ], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, # For backward compatibility with trust remote code models "MptConfig": True, "MptAttentionConfig": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, "IdeficsConfig": True, "IdeficsVisionConfig": True, "IdeficsPerceiverConfig": True, } ) def check_attribute_being_used(config_class, attributes, default_value, source_strings): """Check if any name in `attributes` is used in one of the strings in `source_strings` Args: config_class (`type`): The configuration class for which the arguments in its `__init__` will be checked. attributes (`List[str]`): The name of an argument (or attribute) and its variant names if any. default_value (`Any`): A default value for the attribute in `attributes` assigned in the `__init__` of `config_class`. source_strings (`List[str]`): The python source code strings in the same modeling directory where `config_class` is defined. The file containing the definition of `config_class` should be excluded. """ attribute_used = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"config.{attribute}" in modeling_source or f'getattr(config, "{attribute}"' in modeling_source or f'getattr(self.config, "{attribute}"' in modeling_source ): attribute_used = True # Deal with multi-line cases elif ( re.search( rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"', modeling_source, ) is not None ): attribute_used = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: attribute_used = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files attributes_to_allow = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", "sampling_rate", ] attributes_used_in_generation = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed case_allowed = True if not attribute_used: case_allowed = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: case_allowed = True elif attribute in ["tie_word_embeddings"] and default_value is False: case_allowed = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: case_allowed = True elif attribute.endswith("_token_id"): case_allowed = True # configuration class specific cases if not case_allowed: allowed_cases = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, []) case_allowed = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def check_config_attributes_being_used(config_class): """Check the arguments in `__init__` of `config_class` are used in the modeling files in the same directory Args: config_class (`type`): The configuration class for which the arguments in its `__init__` will be checked. """ # Get the parameters in `__init__` of the configuration class, and the default values if any signature = dict(inspect.signature(config_class.__init__).parameters) parameter_names = [x for x in list(signature.keys()) if x not in ["self", "kwargs"]] parameter_defaults = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass reversed_attribute_map = {} if len(config_class.attribute_map) > 0: reversed_attribute_map = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files config_source_file = inspect.getsourcefile(config_class) model_dir = os.path.dirname(config_source_file) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. modeling_paths = [os.path.join(model_dir, fn) for fn in os.listdir(model_dir) if fn.startswith("modeling_")] # Get the source code strings modeling_sources = [] for path in modeling_paths: if os.path.isfile(path): with open(path, encoding="utf8") as fp: modeling_sources.append(fp.read()) unused_attributes = [] for config_param, default_value in zip(parameter_names, parameter_defaults): # `attributes` here is all the variant names for `config_param` attributes = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param]) if not check_attribute_being_used(config_class, attributes, default_value, modeling_sources): unused_attributes.append(attributes[0]) return sorted(unused_attributes) def check_config_attributes(): """Check the arguments in `__init__` of all configuration classes are used in python files""" configs_with_unused_attributes = {} for _config_class in list(CONFIG_MAPPING.values()): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) config_classes_in_module = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class), lambda x: inspect.isclass(x) and issubclass(x, PretrainedConfig) and inspect.getmodule(x) == inspect.getmodule(_config_class), ) ] for config_class in config_classes_in_module: unused_attributes = check_config_attributes_being_used(config_class) if len(unused_attributes) > 0: configs_with_unused_attributes[config_class.__name__] = unused_attributes if len(configs_with_unused_attributes) > 0: error = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += f"{name}: {attributes}\n" raise ValueError(error) if __name__ == "__main__": check_config_attributes()
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/print_env.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/create_dummy_models.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import collections.abc import copy import inspect import json import multiprocessing import os import shutil import tempfile import traceback from pathlib import Path from check_config_docstrings import get_checkpoint_from_config_class from datasets import load_dataset from get_test_info import get_model_to_tester_mapping, get_tester_classes_for_model from huggingface_hub import Repository, create_repo, hf_api, upload_folder from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoTokenizer, LayoutLMv3TokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerFast, logging, ) from transformers.feature_extraction_utils import FeatureExtractionMixin from transformers.file_utils import is_tf_available, is_torch_available from transformers.image_processing_utils import BaseImageProcessor from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name from transformers.models.fsmt import configuration_fsmt from transformers.processing_utils import ProcessorMixin, transformers_module from transformers.tokenization_utils_base import PreTrainedTokenizerBase # make sure tokenizer plays nice with multiprocessing os.environ["TOKENIZERS_PARALLELISM"] = "false" logging.set_verbosity_error() logging.disable_progress_bar() logger = logging.get_logger(__name__) os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" if not is_torch_available(): raise ValueError("Please install PyTorch.") if not is_tf_available(): raise ValueError("Please install TensorFlow.") FRAMEWORKS = ["pytorch", "tensorflow"] INVALID_ARCH = [] TARGET_VOCAB_SIZE = 1024 data = {"training_ds": None, "testing_ds": None} COMPOSITE_MODELS = { "EncoderDecoderModel": "EncoderDecoderModel-bert-bert", "SpeechEncoderDecoderModel": "SpeechEncoderDecoderModel-wav2vec2-bert", "VisionEncoderDecoderModel": "VisionEncoderDecoderModel-vit-gpt2", "VisionTextDualEncoderModel": "VisionTextDualEncoderModel-vit-bert", } # This list contains the model architectures for which a tiny version could not be created. # Avoid to add new architectures here - unless we have verified carefully that it's (almost) impossible to create them. # One such case is: no model tester class is implemented for a model type (like `MT5`) because its architecture is # identical to another one (`MT5` is based on `T5`), but trained on different datasets or with different techniques. UNCONVERTIBLE_MODEL_ARCHITECTURES = { "BertGenerationEncoder", "BertGenerationDecoder", "CamembertForSequenceClassification", "CamembertForMultipleChoice", "CamembertForMaskedLM", "CamembertForCausalLM", "CamembertForTokenClassification", "CamembertForQuestionAnswering", "CamembertModel", "TFCamembertForMultipleChoice", "TFCamembertForTokenClassification", "TFCamembertForQuestionAnswering", "TFCamembertForSequenceClassification", "TFCamembertForMaskedLM", "TFCamembertModel", "TFCamembertForCausalLM", "DecisionTransformerModel", "GraphormerModel", "InformerModel", "JukeboxModel", "MarianForCausalLM", "MaskFormerSwinModel", "MaskFormerSwinBackbone", "MT5Model", "MT5ForConditionalGeneration", "UMT5ForConditionalGeneration", "TFMT5ForConditionalGeneration", "TFMT5Model", "QDQBertForSequenceClassification", "QDQBertForMaskedLM", "QDQBertModel", "QDQBertForTokenClassification", "QDQBertLMHeadModel", "QDQBertForMultipleChoice", "QDQBertForQuestionAnswering", "QDQBertForNextSentencePrediction", "ReformerModelWithLMHead", "RetriBertModel", "Speech2Text2ForCausalLM", "TimeSeriesTransformerModel", "TrajectoryTransformerModel", "TrOCRForCausalLM", "XLMProphetNetForConditionalGeneration", "XLMProphetNetForCausalLM", "XLMProphetNetModel", "XLMRobertaModel", "XLMRobertaForTokenClassification", "XLMRobertaForMultipleChoice", "XLMRobertaForMaskedLM", "XLMRobertaForCausalLM", "XLMRobertaForSequenceClassification", "XLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForCausalLM", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaModel", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForTokenClassification", } def get_processor_types_from_config_class(config_class, allowed_mappings=None): """Return a tuple of processors for `config_class`. We use `tuple` here to include (potentially) both slow & fast tokenizers. """ # To make a uniform return type def _to_tuple(x): if not isinstance(x, collections.abc.Sequence): x = (x,) else: x = tuple(x) return x if allowed_mappings is None: allowed_mappings = ["processor", "tokenizer", "image_processor", "feature_extractor"] processor_types = () # Check first if a model has `ProcessorMixin`. Otherwise, check if it has tokenizers, and/or an image processor or # a feature extractor if config_class in PROCESSOR_MAPPING and "processor" in allowed_mappings: processor_types = _to_tuple(PROCESSOR_MAPPING[config_class]) else: if config_class in TOKENIZER_MAPPING and "tokenizer" in allowed_mappings: processor_types = TOKENIZER_MAPPING[config_class] if config_class in IMAGE_PROCESSOR_MAPPING and "image_processor" in allowed_mappings: processor_types += _to_tuple(IMAGE_PROCESSOR_MAPPING[config_class]) elif config_class in FEATURE_EXTRACTOR_MAPPING and "feature_extractor" in allowed_mappings: processor_types += _to_tuple(FEATURE_EXTRACTOR_MAPPING[config_class]) # Remark: some configurations have no processor at all. For example, generic composite models like # `EncoderDecoderModel` is used for any (compatible) text models. Also, `DecisionTransformer` doesn't # require any processor. # We might get `None` for some tokenizers - remove them here. processor_types = tuple(p for p in processor_types if p is not None) return processor_types def get_architectures_from_config_class(config_class, arch_mappings, models_to_skip=None): """Return a tuple of all possible architectures attributed to a configuration class `config_class`. For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering]. """ # A model architecture could appear in several mappings. For example, `BartForConditionalGeneration` is in # - MODEL_FOR_PRETRAINING_MAPPING_NAMES # - MODEL_WITH_LM_HEAD_MAPPING_NAMES # - MODEL_FOR_MASKED_LM_MAPPING_NAMES # - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES # We avoid the duplication. architectures = set() if models_to_skip is None: models_to_skip = [] models_to_skip = UNCONVERTIBLE_MODEL_ARCHITECTURES.union(models_to_skip) for mapping in arch_mappings: if config_class in mapping: models = mapping[config_class] models = tuple(models) if isinstance(models, collections.abc.Sequence) else (models,) for model in models: if model.__name__ not in models_to_skip: architectures.add(model) architectures = tuple(architectures) return architectures def get_config_class_from_processor_class(processor_class): """Get the config class from a processor class. Some config/model classes use tokenizers/feature_extractors from other models. For example, `GPT-J` uses `GPT2Tokenizer`. If no checkpoint is found for a config class, or a checkpoint is found without necessary file(s) to create the processor for `processor_class`, we get the config class that corresponds to `processor_class` and use it to find a checkpoint in order to create the processor. """ processor_prefix = processor_class.__name__ for postfix in ["TokenizerFast", "Tokenizer", "ImageProcessor", "FeatureExtractor", "Processor"]: processor_prefix = processor_prefix.replace(postfix, "") # `Wav2Vec2CTCTokenizer` -> `Wav2Vec2Config` if processor_prefix == "Wav2Vec2CTC": processor_prefix = "Wav2Vec2" # Find the new configuration class new_config_name = f"{processor_prefix}Config" new_config_class = getattr(transformers_module, new_config_name) return new_config_class def build_processor(config_class, processor_class, allow_no_checkpoint=False): """Create a processor for `processor_class`. If a processor is not able to be built with the original arguments, this method tries to change the arguments and call itself recursively, by inferring a new `config_class` or a new `processor_class` from another one, in order to find a checkpoint containing the necessary files to build a processor. The processor is not saved here. Instead, it will be saved in `convert_processors` after further changes in `convert_processors`. For each model architecture`, a copy will be created and saved along the built model. """ # Currently, this solely uses the docstring in the source file of `config_class` to find a checkpoint. checkpoint = get_checkpoint_from_config_class(config_class) if checkpoint is None: # try to get the checkpoint from the config class for `processor_class`. # This helps cases like `XCLIPConfig` and `VideoMAEFeatureExtractor` to find a checkpoint from `VideoMAEConfig`. config_class_from_processor_class = get_config_class_from_processor_class(processor_class) checkpoint = get_checkpoint_from_config_class(config_class_from_processor_class) processor = None try: processor = processor_class.from_pretrained(checkpoint) except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") # Try to get a new processor class from checkpoint. This is helpful for a checkpoint without necessary file to load # processor while `processor_class` is an Auto class. For example, `sew` has `Wav2Vec2Processor` in # `PROCESSOR_MAPPING_NAMES`, its `tokenizer_class` is `AutoTokenizer`, and the checkpoint # `https://huggingface.co/asapp/sew-tiny-100k` has no tokenizer file, but we can get # `tokenizer_class: Wav2Vec2CTCTokenizer` from the config file. (The new processor class won't be able to load from # `checkpoint`, but it helps this recursive method to find a way to build a processor). if ( processor is None and checkpoint is not None and issubclass(processor_class, (PreTrainedTokenizerBase, AutoTokenizer)) ): try: config = AutoConfig.from_pretrained(checkpoint) except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") config = None if config is not None: if not isinstance(config, config_class): raise ValueError( f"`config` (which is of type {config.__class__.__name__}) should be an instance of `config_class`" f" ({config_class.__name__})!" ) tokenizer_class = config.tokenizer_class new_processor_class = None if tokenizer_class is not None: new_processor_class = getattr(transformers_module, tokenizer_class) if new_processor_class != processor_class: processor = build_processor(config_class, new_processor_class) # If `tokenizer_class` is not specified in `config`, let's use `config` to get the process class via auto # mappings, but only allow the tokenizer mapping being used. This is to make `Wav2Vec2Conformer` build if processor is None: new_processor_classes = get_processor_types_from_config_class( config.__class__, allowed_mappings=["tokenizer"] ) # Used to avoid infinite recursion between a pair of fast/slow tokenizer types names = [ x.__name__.replace("Fast", "") for x in [processor_class, new_processor_class] if x is not None ] new_processor_classes = [ x for x in new_processor_classes if x is not None and x.__name__.replace("Fast", "") not in names ] if len(new_processor_classes) > 0: new_processor_class = new_processor_classes[0] # Let's use fast tokenizer if there is any for x in new_processor_classes: if x.__name__.endswith("Fast"): new_processor_class = x break processor = build_processor(config_class, new_processor_class) if processor is None: # Try to build each component (tokenizer & feature extractor) of a `ProcessorMixin`. if issubclass(processor_class, ProcessorMixin): attrs = {} for attr_name in processor_class.attributes: attrs[attr_name] = [] # This could be a tuple (for tokenizers). For example, `CLIPProcessor` has # - feature_extractor_class = "CLIPFeatureExtractor" # - tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") attr_class_names = getattr(processor_class, f"{attr_name}_class") if not isinstance(attr_class_names, tuple): attr_class_names = (attr_class_names,) for name in attr_class_names: attr_class = getattr(transformers_module, name) attr = build_processor(config_class, attr_class) if attr is not None: attrs[attr_name].append(attr) # try to build a `ProcessorMixin`, so we can return a single value if all(len(v) > 0 for v in attrs.values()): try: processor = processor_class(**{k: v[0] for k, v in attrs.items()}) except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") else: # `checkpoint` might lack some file(s) to load a processor. For example, `facebook/hubert-base-ls960` # has no tokenizer file to load `Wav2Vec2CTCTokenizer`. In this case, we try to build a processor # with the configuration class (for example, `Wav2Vec2Config`) corresponding to `processor_class`. config_class_from_processor_class = get_config_class_from_processor_class(processor_class) if config_class_from_processor_class != config_class: processor = build_processor(config_class_from_processor_class, processor_class) # Try to create an image processor or a feature extractor without any checkpoint if ( processor is None and allow_no_checkpoint and (issubclass(processor_class, BaseImageProcessor) or issubclass(processor_class, FeatureExtractionMixin)) ): try: processor = processor_class() except Exception as e: logger.error(f"{e.__class__.__name__}: {e}") # validation if processor is not None: if not (isinstance(processor, processor_class) or processor_class.__name__.startswith("Auto")): raise ValueError( f"`processor` (which is of type {processor.__class__.__name__}) should be an instance of" f" {processor_class.__name__} or an Auto class!" ) return processor def get_tiny_config(config_class, model_class=None, **model_tester_kwargs): """Retrieve a tiny configuration from `config_class` using each model's `ModelTester`. Args: config_class: Subclass of `PreTrainedConfig`. Returns: An instance of `config_class` with tiny hyperparameters """ model_type = config_class.model_type # For model type like `data2vec-vision` and `donut-swin`, we can't get the config/model file name directly via # `model_type` as it would be sth. like `configuration_data2vec_vision.py`. # A simple way is to use `inspect.getsourcefile(config_class)`. config_source_file = inspect.getsourcefile(config_class) # The modeling file name without prefix (`modeling_`) and postfix (`.py`) modeling_name = config_source_file.split(os.path.sep)[-1].replace("configuration_", "").replace(".py", "") try: print("Importing", model_type_to_module_name(model_type)) module_name = model_type_to_module_name(model_type) if not modeling_name.startswith(module_name): raise ValueError(f"{modeling_name} doesn't start with {module_name}!") test_file = os.path.join("tests", "models", module_name, f"test_modeling_{modeling_name}.py") models_to_model_testers = get_model_to_tester_mapping(test_file) # Find the model tester class model_tester_class = None tester_classes = [] if model_class is not None: tester_classes = get_tester_classes_for_model(test_file, model_class) else: for _tester_classes in models_to_model_testers.values(): tester_classes.extend(_tester_classes) if len(tester_classes) > 0: # sort with the length of the class names first, then the alphabetical order # This is to avoid `T5EncoderOnlyModelTest` is used instead of `T5ModelTest`, which has # `is_encoder_decoder=False` and causes some pipeline tests failing (also failures in `Optimum` CI). # TODO: More fine grained control of the desired tester class. model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0] except ModuleNotFoundError: error = f"Tiny config not created for {model_type} - cannot find the testing module from the model name." raise ValueError(error) if model_tester_class is None: error = f"Tiny config not created for {model_type} - no model tester is found in the testing module." raise ValueError(error) # CLIP-like models have `text_model_tester` and `vision_model_tester`, and we need to pass `vocab_size` to # `text_model_tester` via `text_kwargs`. The same trick is also necessary for `Flava`. if "vocab_size" in model_tester_kwargs: if "text_kwargs" in inspect.signature(model_tester_class.__init__).parameters.keys(): vocab_size = model_tester_kwargs.pop("vocab_size") model_tester_kwargs["text_kwargs"] = {"vocab_size": vocab_size} # `parent` is an instance of `unittest.TestCase`, but we don't need it here. model_tester = model_tester_class(parent=None, **model_tester_kwargs) if hasattr(model_tester, "get_pipeline_config"): config = model_tester.get_pipeline_config() elif hasattr(model_tester, "prepare_config_and_inputs"): # `PoolFormer` has no `get_config` defined. Furthermore, it's better to use `prepare_config_and_inputs` even if # `get_config` is defined, since there might be some extra changes in `prepare_config_and_inputs`. config = model_tester.prepare_config_and_inputs()[0] elif hasattr(model_tester, "get_config"): config = model_tester.get_config() else: error = ( f"Tiny config not created for {model_type} - the model tester {model_tester_class.__name__} lacks" " necessary method to create config." ) raise ValueError(error) # make sure this is long enough (some model tester has `20` for this attr.) to pass `text-generation` # pipeline tests. max_positions = [] for key in ["max_position_embeddings", "max_source_positions", "max_target_positions"]: if getattr(config, key, 0) > 0: max_positions.append(getattr(config, key)) if getattr(config, "text_config", None) is not None: if getattr(config.text_config, key, None) is not None: max_positions.append(getattr(config.text_config, key)) if len(max_positions) > 0: max_position = max(200, min(max_positions)) for key in ["max_position_embeddings", "max_source_positions", "max_target_positions"]: if getattr(config, key, 0) > 0: setattr(config, key, max_position) if getattr(config, "text_config", None) is not None: if getattr(config.text_config, key, None) is not None: setattr(config.text_config, key, max_position) return config def convert_tokenizer(tokenizer_fast: PreTrainedTokenizerFast): new_tokenizer = tokenizer_fast.train_new_from_iterator( data["training_ds"]["text"], TARGET_VOCAB_SIZE, show_progress=False ) # Make sure it at least runs if not isinstance(new_tokenizer, LayoutLMv3TokenizerFast): new_tokenizer(data["testing_ds"]["text"]) return new_tokenizer def convert_feature_extractor(feature_extractor, tiny_config): to_convert = False kwargs = {} if hasattr(tiny_config, "image_size"): kwargs["size"] = tiny_config.image_size kwargs["crop_size"] = tiny_config.image_size to_convert = True elif ( hasattr(tiny_config, "vision_config") and tiny_config.vision_config is not None and hasattr(tiny_config.vision_config, "image_size") ): kwargs["size"] = tiny_config.vision_config.image_size kwargs["crop_size"] = tiny_config.vision_config.image_size to_convert = True # Speech2TextModel specific. if hasattr(tiny_config, "input_feat_per_channel"): kwargs["feature_size"] = tiny_config.input_feat_per_channel kwargs["num_mel_bins"] = tiny_config.input_feat_per_channel to_convert = True if to_convert: feature_extractor = feature_extractor.__class__(**kwargs) return feature_extractor def convert_processors(processors, tiny_config, output_folder, result): """Change a processor to work with smaller inputs. For tokenizers, we try to reduce their vocabulary size. For feature extractor, we use smaller image size or change other attributes using the values from `tiny_config`. See `convert_feature_extractor`. This method should not fail: we catch the errors and put them in `result["warnings"]` with descriptive messages. """ def _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False): """Set tokenizer(s) to `None` if the fast/slow tokenizers have different values for `vocab_size` or `length`. If `keep_fast_tokenizer=True`, the fast tokenizer will be kept. """ # sanity check 1: fast and slow tokenizers should be compatible (vocab_size) if fast_tokenizer is not None and slow_tokenizer is not None: if fast_tokenizer.vocab_size != slow_tokenizer.vocab_size: warning_messagae = ( "The fast/slow tokenizers " f"({fast_tokenizer.__class__.__name__}/{slow_tokenizer.__class__.__name__}) have different " "vocabulary size: " f"fast_tokenizer.vocab_size = {fast_tokenizer.vocab_size} and " f"slow_tokenizer.vocab_size = {slow_tokenizer.vocab_size}." ) result["warnings"].append(warning_messagae) if not keep_fast_tokenizer: fast_tokenizer = None slow_tokenizer = None # sanity check 2: fast and slow tokenizers should be compatible (length) if fast_tokenizer is not None and slow_tokenizer is not None: if len(fast_tokenizer) != len(slow_tokenizer): warning_messagae = ( f"The fast/slow tokenizers () have different length: " f"len(fast_tokenizer) = {len(fast_tokenizer)} and " f"len(slow_tokenizer) = {len(slow_tokenizer)}." ) result["warnings"].append(warning_messagae) if not keep_fast_tokenizer: fast_tokenizer = None slow_tokenizer = None return fast_tokenizer, slow_tokenizer tokenizers = [] feature_extractors = [] for processor in processors: if isinstance(processor, PreTrainedTokenizerBase): if processor.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}: tokenizers.append(processor) elif isinstance(processor, BaseImageProcessor): if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}: feature_extractors.append(processor) elif isinstance(processor, FeatureExtractionMixin): if processor.__class__.__name__ not in {x.__class__.__name__ for x in feature_extractors}: feature_extractors.append(processor) elif isinstance(processor, ProcessorMixin): if hasattr(processor, "tokenizer"): if processor.tokenizer.__class__.__name__ not in {x.__class__.__name__ for x in tokenizers}: tokenizers.append(processor.tokenizer) # Currently, we only have these 2 possibilities if hasattr(processor, "image_processor"): if processor.image_processor.__class__.__name__ not in { x.__class__.__name__ for x in feature_extractors }: feature_extractors.append(processor.image_processor) elif hasattr(processor, "feature_extractor"): if processor.feature_extractor.__class__.__name__ not in { x.__class__.__name__ for x in feature_extractors }: feature_extractors.append(processor.feature_extractor) # check the built processors have the unique type num_types = len({x.__class__.__name__ for x in feature_extractors}) if num_types >= 2: raise ValueError(f"`feature_extractors` should contain at most 1 type, but it contains {num_types} types!") num_types = len({x.__class__.__name__.replace("Fast", "") for x in tokenizers}) if num_types >= 2: raise ValueError(f"`tokenizers` should contain at most 1 tokenizer type, but it contains {num_types} types!") fast_tokenizer = None slow_tokenizer = None for tokenizer in tokenizers: if isinstance(tokenizer, PreTrainedTokenizerFast): fast_tokenizer = tokenizer else: slow_tokenizer = tokenizer # If the (original) fast/slow tokenizers don't correspond, keep only the fast tokenizer. # This doesn't necessarily imply the fast/slow tokenizers in a single Hub repo. has issues. # It's more of an issue in `build_processor` which tries to get a checkpoint with as much effort as possible. # For `YosoModel` (which uses `AlbertTokenizer(Fast)`), its real (Hub) checkpoint doesn't contain valid files to # load the slower tokenizer (`AlbertTokenizer`), and it ends up finding the (canonical) checkpoint of `AlbertModel`, # which has different vocabulary. # TODO: Try to improve `build_processor`'s definition and/or usage to avoid the above situation in the first place. fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=True) original_fast_tokenizer, original_slow_tokenizer = fast_tokenizer, slow_tokenizer if fast_tokenizer: try: # Wav2Vec2ForCTC , ByT5Tokenizer etc. all are already small enough and have no fast version that can # be retrained if fast_tokenizer.vocab_size > TARGET_VOCAB_SIZE: fast_tokenizer = convert_tokenizer(fast_tokenizer) except Exception: result["warnings"].append( ( f"Failed to convert the fast tokenizer for {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) # If `fast_tokenizer` exists, `slow_tokenizer` should correspond to it. if fast_tokenizer: # Make sure the fast tokenizer can be saved try: # We don't save it to `output_folder` at this moment - only at the end of this function. with tempfile.TemporaryDirectory() as tmpdir: fast_tokenizer.save_pretrained(tmpdir) try: slow_tokenizer = AutoTokenizer.from_pretrained(tmpdir, use_fast=False) except Exception: result["warnings"].append( ( f"Failed to load the slow tokenizer saved from {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) # Let's just keep the fast version slow_tokenizer = None except Exception: result["warnings"].append( ( f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) fast_tokenizer = None # If the (possibly converted) fast/slow tokenizers don't correspond, set them to `None`, and use the original # tokenizers. fast_tokenizer, slow_tokenizer = _sanity_check(fast_tokenizer, slow_tokenizer, keep_fast_tokenizer=False) # If there is any conversion failed, we keep the original tokenizers. if (original_fast_tokenizer is not None and fast_tokenizer is None) or ( original_slow_tokenizer is not None and slow_tokenizer is None ): warning_messagae = ( "There are some issues when converting the fast/slow tokenizers. The original tokenizers from the Hub " " will be used instead." ) result["warnings"].append(warning_messagae) # Let's use the original version at the end (`original_fast_tokenizer` and `original_slow_tokenizer`) fast_tokenizer = original_fast_tokenizer slow_tokenizer = original_slow_tokenizer # Make sure the fast tokenizer can be saved if fast_tokenizer: # We don't save it to `output_folder` at this moment - only at the end of this function. with tempfile.TemporaryDirectory() as tmpdir: try: fast_tokenizer.save_pretrained(tmpdir) except Exception: result["warnings"].append( ( f"Failed to save the fast tokenizer for {fast_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) fast_tokenizer = None # Make sure the slow tokenizer can be saved if slow_tokenizer: # We don't save it to `output_folder` at this moment - only at the end of this function. with tempfile.TemporaryDirectory() as tmpdir: try: slow_tokenizer.save_pretrained(tmpdir) except Exception: result["warnings"].append( ( f"Failed to save the slow tokenizer for {slow_tokenizer.__class__.__name__}.", traceback.format_exc(), ) ) slow_tokenizer = None # update feature extractors using the tiny config try: feature_extractors = [convert_feature_extractor(p, tiny_config) for p in feature_extractors] except Exception: result["warnings"].append( ( "Failed to convert feature extractors.", traceback.format_exc(), ) ) feature_extractors = [] if hasattr(tiny_config, "max_position_embeddings") and tiny_config.max_position_embeddings > 0: if fast_tokenizer is not None: if fast_tokenizer.__class__.__name__ in [ "RobertaTokenizerFast", "XLMRobertaTokenizerFast", "LongformerTokenizerFast", "MPNetTokenizerFast", ]: fast_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2 else: fast_tokenizer.model_max_length = tiny_config.max_position_embeddings if slow_tokenizer is not None: if slow_tokenizer.__class__.__name__ in [ "RobertaTokenizer", "XLMRobertaTokenizer", "LongformerTokenizer", "MPNetTokenizer", ]: slow_tokenizer.model_max_length = tiny_config.max_position_embeddings - 2 else: slow_tokenizer.model_max_length = tiny_config.max_position_embeddings processors = [fast_tokenizer, slow_tokenizer] + feature_extractors processors = [p for p in processors if p is not None] for p in processors: p.save_pretrained(output_folder) return processors def get_checkpoint_dir(output_dir, model_arch): """Get framework-agnostic architecture name. Used to save all PT/TF/Flax models into the same directory.""" arch_name = model_arch.__name__ if arch_name.startswith("TF"): arch_name = arch_name[2:] elif arch_name.startswith("Flax"): arch_name = arch_name[4:] return os.path.join(output_dir, arch_name) def build_model(model_arch, tiny_config, output_dir): """Create and save a model for `model_arch`. Also copy the set of processors to each model (under the same model type) output folder. """ checkpoint_dir = get_checkpoint_dir(output_dir, model_arch) processor_output_dir = os.path.join(output_dir, "processors") # copy the (same set of) processors (for a model type) to the model arch. specific folder if os.path.isdir(processor_output_dir): shutil.copytree(processor_output_dir, checkpoint_dir, dirs_exist_ok=True) tiny_config = copy.deepcopy(tiny_config) if any(model_arch.__name__.endswith(x) for x in ["ForCausalLM", "LMHeadModel"]): tiny_config.is_encoder_decoder = False tiny_config.is_decoder = True model = model_arch(config=tiny_config) model.save_pretrained(checkpoint_dir) model.from_pretrained(checkpoint_dir) return model def fill_result_with_error(result, error, trace, models_to_create): """Fill `result` with errors for all target model arch if we can't build processor""" error = (error, trace) result["error"] = error for framework in FRAMEWORKS: if framework in models_to_create: result[framework] = {} for model_arch in models_to_create[framework]: result[framework][model_arch.__name__] = {"model": None, "checkpoint": None, "error": error} result["processor"] = {p.__class__.__name__: p.__class__.__name__ for p in result["processor"].values()} def upload_model(model_dir, organization, token): """Upload the tiny models""" arch_name = model_dir.split(os.path.sep)[-1] repo_name = f"tiny-random-{arch_name}" repo_id = f"{organization}/{repo_name}" repo_exist = False error = None try: create_repo(repo_id=repo_id, exist_ok=False, repo_type="model", token=token) except Exception as e: error = e if "You already created" in str(e): error = None logger.warning("Remote repository exists and will be cloned.") repo_exist = True try: create_repo(repo_id=repo_id, exist_ok=True, repo_type="model", token=token) except Exception as e: error = e if error is not None: raise error with tempfile.TemporaryDirectory() as tmpdir: repo = Repository(local_dir=tmpdir, clone_from=repo_id, token=token) repo.git_pull() shutil.copytree(model_dir, tmpdir, dirs_exist_ok=True) if repo_exist: # Open a PR on the existing Hub repo. hub_pr_url = upload_folder( folder_path=model_dir, repo_id=repo_id, repo_type="model", commit_message=f"Update tiny models for {arch_name}", commit_description=f"Upload tiny models for {arch_name}", create_pr=True, token=token, ) logger.warning(f"PR open in {hub_pr_url}.") # TODO: We need this information? else: # Push to Hub repo directly repo.git_add(auto_lfs_track=True) repo.git_commit(f"Upload tiny models for {arch_name}") repo.git_push(blocking=True) # this prints a progress bar with the upload logger.warning(f"Tiny models {arch_name} pushed to {repo_id}.") def build_composite_models(config_class, output_dir): import tempfile from transformers import ( BertConfig, BertLMHeadModel, BertModel, BertTokenizer, BertTokenizerFast, EncoderDecoderModel, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, GPT2TokenizerFast, SpeechEncoderDecoderModel, TFEncoderDecoderModel, TFVisionEncoderDecoderModel, TFVisionTextDualEncoderModel, VisionEncoderDecoderModel, VisionTextDualEncoderModel, ViTConfig, ViTFeatureExtractor, ViTModel, Wav2Vec2Config, Wav2Vec2Model, Wav2Vec2Processor, ) # These will be removed at the end if they are empty result = {"error": None, "warnings": []} if config_class.model_type == "encoder-decoder": encoder_config_class = BertConfig decoder_config_class = BertConfig encoder_processor = (BertTokenizerFast, BertTokenizer) decoder_processor = (BertTokenizerFast, BertTokenizer) encoder_class = BertModel decoder_class = BertLMHeadModel model_class = EncoderDecoderModel tf_model_class = TFEncoderDecoderModel elif config_class.model_type == "vision-encoder-decoder": encoder_config_class = ViTConfig decoder_config_class = GPT2Config encoder_processor = (ViTFeatureExtractor,) decoder_processor = (GPT2TokenizerFast, GPT2Tokenizer) encoder_class = ViTModel decoder_class = GPT2LMHeadModel model_class = VisionEncoderDecoderModel tf_model_class = TFVisionEncoderDecoderModel elif config_class.model_type == "speech-encoder-decoder": encoder_config_class = Wav2Vec2Config decoder_config_class = BertConfig encoder_processor = (Wav2Vec2Processor,) decoder_processor = (BertTokenizerFast, BertTokenizer) encoder_class = Wav2Vec2Model decoder_class = BertLMHeadModel model_class = SpeechEncoderDecoderModel tf_model_class = None elif config_class.model_type == "vision-text-dual-encoder": # Not encoder-decoder, but encoder-encoder. We just keep the same name as above to make code easier encoder_config_class = ViTConfig decoder_config_class = BertConfig encoder_processor = (ViTFeatureExtractor,) decoder_processor = (BertTokenizerFast, BertTokenizer) encoder_class = ViTModel decoder_class = BertModel model_class = VisionTextDualEncoderModel tf_model_class = TFVisionTextDualEncoderModel with tempfile.TemporaryDirectory() as tmpdir: try: # build encoder models_to_create = {"processor": encoder_processor, "pytorch": (encoder_class,), "tensorflow": []} encoder_output_dir = os.path.join(tmpdir, "encoder") build(encoder_config_class, models_to_create, encoder_output_dir) # build decoder models_to_create = {"processor": decoder_processor, "pytorch": (decoder_class,), "tensorflow": []} decoder_output_dir = os.path.join(tmpdir, "decoder") build(decoder_config_class, models_to_create, decoder_output_dir) # build encoder-decoder encoder_path = os.path.join(encoder_output_dir, encoder_class.__name__) decoder_path = os.path.join(decoder_output_dir, decoder_class.__name__) if config_class.model_type != "vision-text-dual-encoder": # Specify these explicitly for encoder-decoder like models, but not for `vision-text-dual-encoder` as it # has no decoder. decoder_config = decoder_config_class.from_pretrained(decoder_path) decoder_config.is_decoder = True decoder_config.add_cross_attention = True model = model_class.from_encoder_decoder_pretrained( encoder_path, decoder_path, decoder_config=decoder_config, ) elif config_class.model_type == "vision-text-dual-encoder": model = model_class.from_vision_text_pretrained(encoder_path, decoder_path) model_path = os.path.join( output_dir, f"{model_class.__name__}-{encoder_config_class.model_type}-{decoder_config_class.model_type}", ) model.save_pretrained(model_path) if tf_model_class is not None: model = tf_model_class.from_pretrained(model_path) model.save_pretrained(model_path) # copy the processors encoder_processor_path = os.path.join(encoder_output_dir, "processors") decoder_processor_path = os.path.join(decoder_output_dir, "processors") if os.path.isdir(encoder_processor_path): shutil.copytree(encoder_processor_path, model_path, dirs_exist_ok=True) if os.path.isdir(decoder_processor_path): shutil.copytree(decoder_processor_path, model_path, dirs_exist_ok=True) # fill `result` result["processor"] = {x.__name__: x.__name__ for x in encoder_processor + decoder_processor} result["pytorch"] = {model_class.__name__: {"model": model_class.__name__, "checkpoint": model_path}} result["tensorflow"] = {} if tf_model_class is not None: result["tensorflow"] = { tf_model_class.__name__: {"model": tf_model_class.__name__, "checkpoint": model_path} } except Exception: result["error"] = ( f"Failed to build models for {config_class.__name__}.", traceback.format_exc(), ) if not result["error"]: del result["error"] if not result["warnings"]: del result["warnings"] return result def get_token_id_from_tokenizer(token_id_name, tokenizer, original_token_id): """Use `tokenizer` to get the values of `bos_token_id`, `eos_token_ids`, etc. The argument `token_id_name` should be a string ending with `_token_id`, and `original_token_id` should be an integer that will be return if `tokenizer` has no token corresponding to `token_id_name`. """ token_id = original_token_id if not token_id_name.endswith("_token_id"): raise ValueError(f"`token_id_name` is {token_id_name}, which doesn't end with `_token_id`!") token = getattr(tokenizer, token_id_name.replace("_token_id", "_token"), None) if token is not None: if isinstance(tokenizer, PreTrainedTokenizerFast): token_id = tokenizer._convert_token_to_id_with_added_voc(token) else: token_id = tokenizer._convert_token_to_id(token) return token_id def get_config_overrides(config_class, processors): # `Bark` configuration is too special. Let's just not handle this for now. if config_class.__name__ == "BarkConfig": return {} config_overrides = {} # Check if there is any tokenizer (prefer fast version if any) tokenizer = None for processor in processors: if isinstance(processor, PreTrainedTokenizerFast): tokenizer = processor break elif isinstance(processor, PreTrainedTokenizer): tokenizer = processor if tokenizer is None: return config_overrides # Get some properties of the (already converted) tokenizer (smaller vocab size, special token ids, etc.) # We use `len(tokenizer)` instead of `tokenizer.vocab_size` to avoid potential issues for tokenizers with non-empty # `added_tokens_encoder`. One example is the `DebertaV2Tokenizer` where the mask token is the extra token. vocab_size = len(tokenizer) # The original checkpoint has length `35998`, but it doesn't have ids `30400` and `30514` but instead `35998` and # `35999`. if config_class.__name__ == "GPTSanJapaneseConfig": vocab_size += 2 config_overrides["vocab_size"] = vocab_size # Used to create a new model tester with `tokenizer.vocab_size` in order to get the (updated) special token ids. model_tester_kwargs = {"vocab_size": vocab_size} # `FSMTModelTester` accepts `src_vocab_size` and `tgt_vocab_size` but not `vocab_size`. if config_class.__name__ == "FSMTConfig": del model_tester_kwargs["vocab_size"] model_tester_kwargs["src_vocab_size"] = tokenizer.src_vocab_size model_tester_kwargs["tgt_vocab_size"] = tokenizer.tgt_vocab_size _tiny_config = get_tiny_config(config_class, **model_tester_kwargs) # handle the possibility of `text_config` inside `_tiny_config` for clip-like models (`owlvit`, `groupvit`, etc.) if hasattr(_tiny_config, "text_config"): _tiny_config = _tiny_config.text_config # Collect values of some special token ids for attr in dir(_tiny_config): if attr.endswith("_token_id"): token_id = getattr(_tiny_config, attr) if token_id is not None: # Using the token id values from `tokenizer` instead of from `_tiny_config`. token_id = get_token_id_from_tokenizer(attr, tokenizer, original_token_id=token_id) config_overrides[attr] = token_id if config_class.__name__ == "FSMTConfig": config_overrides["src_vocab_size"] = tokenizer.src_vocab_size config_overrides["tgt_vocab_size"] = tokenizer.tgt_vocab_size # `FSMTConfig` has `DecoderConfig` as `decoder` attribute. config_overrides["decoder"] = configuration_fsmt.DecoderConfig( vocab_size=tokenizer.tgt_vocab_size, bos_token_id=config_overrides["eos_token_id"] ) return config_overrides def build(config_class, models_to_create, output_dir): """Create all models for a certain model type. Args: config_class (`PretrainedConfig`): A subclass of `PretrainedConfig` that is used to determine `models_to_create`. models_to_create (`dict`): A dictionary containing the processor/model classes that we want to create the instances. These models are of the same model type which is associated to `config_class`. output_dir (`str`): The directory to save all the checkpoints. Each model architecture will be saved in a subdirectory under it. Models in different frameworks with the same architecture will be saved in the same subdirectory. """ if data["training_ds"] is None or data["testing_ds"] is None: ds = load_dataset("wikitext", "wikitext-2-raw-v1") data["training_ds"] = ds["train"] data["testing_ds"] = ds["test"] if config_class.model_type in [ "encoder-decoder", "vision-encoder-decoder", "speech-encoder-decoder", "vision-text-dual-encoder", ]: return build_composite_models(config_class, output_dir) result = {k: {} for k in models_to_create} # These will be removed at the end if they are empty result["error"] = None result["warnings"] = [] # Build processors processor_classes = models_to_create["processor"] if len(processor_classes) == 0: error = f"No processor class could be found in {config_class.__name__}." fill_result_with_error(result, error, None, models_to_create) logger.error(result["error"][0]) return result for processor_class in processor_classes: try: processor = build_processor(config_class, processor_class, allow_no_checkpoint=True) if processor is not None: result["processor"][processor_class] = processor except Exception: error = f"Failed to build processor for {processor_class.__name__}." trace = traceback.format_exc() fill_result_with_error(result, error, trace, models_to_create) logger.error(result["error"][0]) return result if len(result["processor"]) == 0: error = f"No processor could be built for {config_class.__name__}." fill_result_with_error(result, error, None, models_to_create) logger.error(result["error"][0]) return result try: tiny_config = get_tiny_config(config_class) except Exception as e: error = f"Failed to get tiny config for {config_class.__name__}: {e}" trace = traceback.format_exc() fill_result_with_error(result, error, trace, models_to_create) logger.error(result["error"][0]) return result # Convert the processors (reduce vocabulary size, smaller image size, etc.) processors = list(result["processor"].values()) processor_output_folder = os.path.join(output_dir, "processors") try: processors = convert_processors(processors, tiny_config, processor_output_folder, result) except Exception: error = "Failed to convert the processors." trace = traceback.format_exc() result["warnings"].append((error, trace)) if len(processors) == 0: error = f"No processor is returned by `convert_processors` for {config_class.__name__}." fill_result_with_error(result, error, None, models_to_create) logger.error(result["error"][0]) return result try: config_overrides = get_config_overrides(config_class, processors) except Exception as e: error = f"Failure occurs while calling `get_config_overrides`: {e}" trace = traceback.format_exc() fill_result_with_error(result, error, trace, models_to_create) logger.error(result["error"][0]) return result # Just for us to see this easily in the report if "vocab_size" in config_overrides: result["vocab_size"] = config_overrides["vocab_size"] # Update attributes that `vocab_size` involves for k, v in config_overrides.items(): if hasattr(tiny_config, k): setattr(tiny_config, k, v) # So far, we only have to deal with `text_config`, as `config_overrides` contains text-related attributes only. # `FuyuConfig` saves data under both FuyuConfig and its `text_config`. This is not good, but let's just update # every involved fields to avoid potential failure. if ( hasattr(tiny_config, "text_config") and tiny_config.text_config is not None and hasattr(tiny_config.text_config, k) ): setattr(tiny_config.text_config, k, v) # If `text_config_dict` exists, we need to update its value here too in order to # make # `save_pretrained -> from_pretrained` work. if hasattr(tiny_config, "text_config_dict"): tiny_config.text_config_dict[k] = v if result["warnings"]: logger.warning(result["warnings"][0][0]) # update `result["processor"]` result["processor"] = {type(p).__name__: p.__class__.__name__ for p in processors} for pytorch_arch in models_to_create["pytorch"]: result["pytorch"][pytorch_arch.__name__] = {} error = None try: model = build_model(pytorch_arch, tiny_config, output_dir=output_dir) except Exception as e: model = None error = f"Failed to create the pytorch model for {pytorch_arch}: {e}" trace = traceback.format_exc() result["pytorch"][pytorch_arch.__name__]["model"] = model.__class__.__name__ if model is not None else None result["pytorch"][pytorch_arch.__name__]["checkpoint"] = ( get_checkpoint_dir(output_dir, pytorch_arch) if model is not None else None ) if error is not None: result["pytorch"][pytorch_arch.__name__]["error"] = (error, trace) logger.error(f"{pytorch_arch.__name__}: {error}") for tensorflow_arch in models_to_create["tensorflow"]: # Make PT/TF weights compatible pt_arch_name = tensorflow_arch.__name__[2:] # Remove `TF` pt_arch = getattr(transformers_module, pt_arch_name) result["tensorflow"][tensorflow_arch.__name__] = {} error = None if pt_arch.__name__ in result["pytorch"] and result["pytorch"][pt_arch.__name__]["checkpoint"] is not None: ckpt = get_checkpoint_dir(output_dir, pt_arch) # Use the same weights from PyTorch. try: model = tensorflow_arch.from_pretrained(ckpt) model.save_pretrained(ckpt) except Exception as e: # Conversion may fail. Let's not create a model with different weights to avoid confusion (for now). model = None error = f"Failed to convert the pytorch model to the tensorflow model for {pt_arch}: {e}" trace = traceback.format_exc() else: try: model = build_model(tensorflow_arch, tiny_config, output_dir=output_dir) except Exception as e: model = None error = f"Failed to create the tensorflow model for {tensorflow_arch}: {e}" trace = traceback.format_exc() result["tensorflow"][tensorflow_arch.__name__]["model"] = ( model.__class__.__name__ if model is not None else None ) result["tensorflow"][tensorflow_arch.__name__]["checkpoint"] = ( get_checkpoint_dir(output_dir, tensorflow_arch) if model is not None else None ) if error is not None: result["tensorflow"][tensorflow_arch.__name__]["error"] = (error, trace) logger.error(f"{tensorflow_arch.__name__}: {error}") if not result["error"]: del result["error"] if not result["warnings"]: del result["warnings"] return result def build_tiny_model_summary(results, organization=None, token=None): """Build a summary: a dictionary of the form { model architecture name: { "tokenizer_classes": [...], "processor_classes": [...], "model_classes": [...], } .. } """ tiny_model_summary = {} for config_name in results: processors = [key for key, value in results[config_name]["processor"].items()] tokenizer_classes = sorted([x for x in processors if x.endswith("TokenizerFast") or x.endswith("Tokenizer")]) processor_classes = sorted([x for x in processors if x not in tokenizer_classes]) for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: model_classes = [arch_name] base_arch_name = arch_name[2:] if arch_name.startswith("TF") else arch_name # tiny model is not created for `arch_name` if results[config_name][framework][arch_name]["model"] is None: model_classes = [] if base_arch_name not in tiny_model_summary: tiny_model_summary[base_arch_name] = {} tiny_model_summary[base_arch_name].update( { "tokenizer_classes": tokenizer_classes, "processor_classes": processor_classes, } ) tiny_model_summary[base_arch_name]["model_classes"] = sorted( tiny_model_summary[base_arch_name].get("model_classes", []) + model_classes ) if organization is not None: repo_name = f"tiny-random-{base_arch_name}" # composite models' checkpoints have more precise repo. names on the Hub. if base_arch_name in COMPOSITE_MODELS: repo_name = f"tiny-random-{COMPOSITE_MODELS[base_arch_name]}" repo_id = f"{organization}/{repo_name}" try: commit_hash = hf_api.repo_info(repo_id, token=token).sha except Exception: # The directory is not created, but processor(s) is/are included in `results`. logger.warning(f"Failed to get information for {repo_id}.\n{traceback.format_exc()}") del tiny_model_summary[base_arch_name] continue tiny_model_summary[base_arch_name]["sha"] = commit_hash return tiny_model_summary def build_failed_report(results, include_warning=True): failed_results = {} for config_name in results: if "error" in results[config_name]: if config_name not in failed_results: failed_results[config_name] = {} failed_results[config_name] = {"error": results[config_name]["error"]} if include_warning and "warnings" in results[config_name]: if config_name not in failed_results: failed_results[config_name] = {} failed_results[config_name]["warnings"] = results[config_name]["warnings"] for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: if "error" in results[config_name][framework][arch_name]: if config_name not in failed_results: failed_results[config_name] = {} if framework not in failed_results[config_name]: failed_results[config_name][framework] = {} if arch_name not in failed_results[config_name][framework]: failed_results[config_name][framework][arch_name] = {} error = results[config_name][framework][arch_name]["error"] failed_results[config_name][framework][arch_name]["error"] = error return failed_results def build_simple_report(results): text = "" failed_text = "" for config_name in results: for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: if "error" in results[config_name][framework][arch_name]: result = results[config_name][framework][arch_name]["error"] failed_text += f"{arch_name}: {result[0]}\n" else: result = ("OK",) text += f"{arch_name}: {result[0]}\n" return text, failed_text def update_tiny_model_summary_file(report_path): with open(os.path.join(report_path, "tiny_model_summary.json")) as fp: new_data = json.load(fp) with open("tests/utils/tiny_model_summary.json") as fp: data = json.load(fp) for key, value in new_data.items(): if key not in data: data[key] = value else: for attr in ["tokenizer_classes", "processor_classes", "model_classes"]: # we might get duplication here. We will remove them below when creating `updated_data`. data[key][attr].extend(value[attr]) new_sha = value.get("sha", None) if new_sha is not None: data[key]["sha"] = new_sha updated_data = {} for key in sorted(data.keys()): updated_data[key] = {} for attr, value in data[key].items(): # deduplication and sort updated_data[key][attr] = sorted(set(value)) if attr != "sha" else value with open(os.path.join(report_path, "updated_tiny_model_summary.json"), "w") as fp: json.dump(updated_data, fp, indent=4, ensure_ascii=False) def create_tiny_models( output_path, all, model_types, models_to_skip, no_check, upload, organization, token, num_workers=1, ): clone_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if os.getcwd() != clone_path: raise ValueError(f"This script should be run from the root of the clone of `transformers` {clone_path}") report_path = os.path.join(output_path, "reports") os.makedirs(report_path) _pytorch_arch_mappings = [ x for x in dir(transformers_module) if x.startswith("MODEL_") and x.endswith("_MAPPING") and x != "MODEL_NAMES_MAPPING" ] _tensorflow_arch_mappings = [ x for x in dir(transformers_module) if x.startswith("TF_MODEL_") and x.endswith("_MAPPING") ] pytorch_arch_mappings = [getattr(transformers_module, x) for x in _pytorch_arch_mappings] tensorflow_arch_mappings = [getattr(transformers_module, x) for x in _tensorflow_arch_mappings] config_classes = CONFIG_MAPPING.values() if not all: config_classes = [CONFIG_MAPPING[model_type] for model_type in model_types] # A map from config classes to tuples of processors (tokenizer, feature extractor, processor) classes processor_type_map = {c: get_processor_types_from_config_class(c) for c in config_classes} to_create = {} for c in config_classes: processors = processor_type_map[c] models = get_architectures_from_config_class(c, pytorch_arch_mappings, models_to_skip) tf_models = get_architectures_from_config_class(c, tensorflow_arch_mappings, models_to_skip) if len(models) + len(tf_models) > 0: to_create[c] = {"processor": processors, "pytorch": models, "tensorflow": tf_models} results = {} if num_workers <= 1: for c, models_to_create in list(to_create.items()): print(f"Create models for {c.__name__} ...") result = build(c, models_to_create, output_dir=os.path.join(output_path, c.model_type)) results[c.__name__] = result print("=" * 40) else: all_build_args = [] for c, models_to_create in list(to_create.items()): all_build_args.append((c, models_to_create, os.path.join(output_path, c.model_type))) with multiprocessing.Pool() as pool: results = pool.starmap(build, all_build_args) results = {buid_args[0].__name__: result for buid_args, result in zip(all_build_args, results)} if upload: if organization is None: raise ValueError("The argument `organization` could not be `None`. No model is uploaded") to_upload = [] for model_type in os.listdir(output_path): # This is the directory containing the reports if model_type == "reports": continue for arch in os.listdir(os.path.join(output_path, model_type)): if arch == "processors": continue to_upload.append(os.path.join(output_path, model_type, arch)) to_upload = sorted(to_upload) upload_results = {} if len(to_upload) > 0: for model_dir in to_upload: try: upload_model(model_dir, organization, token) except Exception as e: error = f"Failed to upload {model_dir}. {e.__class__.__name__}: {e}" logger.error(error) upload_results[model_dir] = error with open(os.path.join(report_path, "failed_uploads.json"), "w") as fp: json.dump(upload_results, fp, indent=4) # Build the tiny model summary file. The `tokenizer_classes` and `processor_classes` could be both empty lists. # When using the items in this file to update the file `tests/utils/tiny_model_summary.json`, the model # architectures with `tokenizer_classes` and `processor_classes` being both empty should **NOT** be added to # `tests/utils/tiny_model_summary.json`. tiny_model_summary = build_tiny_model_summary(results, organization=organization, token=token) with open(os.path.join(report_path, "tiny_model_summary.json"), "w") as fp: json.dump(tiny_model_summary, fp, indent=4) with open(os.path.join(report_path, "tiny_model_creation_report.json"), "w") as fp: json.dump(results, fp, indent=4) # Build the warning/failure report (json format): same format as the complete `results` except this contains only # warnings or errors. failed_results = build_failed_report(results) with open(os.path.join(report_path, "failed_report.json"), "w") as fp: json.dump(failed_results, fp, indent=4) simple_report, failed_report = build_simple_report(results) # The simplified report: a .txt file with each line of format: # {model architecture name}: {OK or error message} with open(os.path.join(report_path, "simple_report.txt"), "w") as fp: fp.write(simple_report) # The simplified failure report: same above except this only contains line with errors with open(os.path.join(report_path, "simple_failed_report.txt"), "w") as fp: fp.write(failed_report) update_tiny_model_summary_file(report_path=os.path.join(output_path, "reports")) if __name__ == "__main__": # This has to be `spawn` to avoid hanging forever! multiprocessing.set_start_method("spawn") def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--all", action="store_true", help="Will create all tiny models.") parser.add_argument( "--no_check", action="store_true", help="If set, will not check the validity of architectures. Use with caution.", ) parser.add_argument( "-m", "--model_types", type=list_str, help="Comma-separated list of model type(s) from which the tiny models will be created.", ) parser.add_argument( "--models_to_skip", type=list_str, help=( "Comma-separated list of model class names(s) from which the tiny models won't be created.\nThis is usually " "the list of model classes that have their tiny versions already uploaded to the Hub." ), ) parser.add_argument("--upload", action="store_true", help="If to upload the created tiny models to the Hub.") parser.add_argument( "--organization", default=None, type=str, help="The organization on the Hub to which the tiny models will be uploaded.", ) parser.add_argument( "--token", default=None, type=str, help="A valid authentication token for HuggingFace Hub with write access." ) parser.add_argument("output_path", type=Path, help="Path indicating where to store generated model.") parser.add_argument("--num_workers", default=1, type=int, help="The number of workers to run.") args = parser.parse_args() if not args.all and not args.model_types: raise ValueError("Please provide at least one model type or pass `--all` to export all architectures.") create_tiny_models( args.output_path, args.all, args.model_types, args.models_to_skip, args.no_check, args.upload, args.organization, args.token, args.num_workers, )
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/check_build.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import importlib from pathlib import Path # Test all the extensions added in the setup FILES_TO_FIND = [ "kernels/rwkv/wkv_cuda.cu", "kernels/rwkv/wkv_op.cpp", "kernels/deformable_detr/ms_deform_attn.h", "kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh", "models/graphormer/algos_graphormer.pyx", ] def test_custom_files_are_present(transformers_path): # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.") args = parser.parse_args() if args.check_lib: transformers_module = importlib.import_module("transformers") transformers_path = Path(transformers_module.__file__).parent else: transformers_path = Path.cwd() / "build/lib/transformers" if not test_custom_files_are_present(transformers_path): raise ValueError("The built release does not contain the custom files. Fix this before going further!")
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/past_ci_versions.py
import argparse import os past_versions_testing = { "pytorch": { "1.13": { "torch": "1.13.1", "torchvision": "0.14.1", "torchaudio": "0.13.1", "python": 3.9, "cuda": "cu116", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1" " --extra-index-url https://download.pytorch.org/whl/cu116" ), "base_image": "nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04", }, "1.12": { "torch": "1.12.1", "torchvision": "0.13.1", "torchaudio": "0.12.1", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "1.11": { "torch": "1.11.0", "torchvision": "0.12.0", "torchaudio": "0.11.0", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "1.10": { "torch": "1.10.2", "torchvision": "0.11.3", "torchaudio": "0.10.2", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.10.2 torchvision==0.11.3 torchaudio==0.10.2" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, # torchaudio < 0.10 has no CUDA-enabled binary distributions "1.9": { "torch": "1.9.1", "torchvision": "0.10.1", "torchaudio": "0.9.1", "python": 3.9, "cuda": "cu111", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1" " --extra-index-url https://download.pytorch.org/whl/cu111" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, }, "tensorflow": { "2.11": { "tensorflow": "2.11.1", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.11.1", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.10": { "tensorflow": "2.10.1", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.10.1", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.9": { "tensorflow": "2.9.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.9.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.8": { "tensorflow": "2.8.2", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.8.2", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.7": { "tensorflow": "2.7.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.7.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.6": { "tensorflow": "2.6.5", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.6.5", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.5": { "tensorflow": "2.5.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.5.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, }, } if __name__ == "__main__": parser = argparse.ArgumentParser("Choose the framework and version to install") parser.add_argument( "--framework", help="The framework to install. Should be `torch` or `tensorflow`", type=str, required=True ) parser.add_argument("--version", help="The version of the framework to install.", type=str, required=True) args = parser.parse_args() info = past_versions_testing[args.framework][args.version] os.system(f'echo "export INSTALL_CMD=\'{info["install"]}\'" >> ~/.profile') print(f'echo "export INSTALL_CMD=\'{info["install"]}\'" >> ~/.profile') cuda = "" if args.framework == "pytorch": cuda = info["cuda"] os.system(f"echo \"export CUDA='{cuda}'\" >> ~/.profile") print(f"echo \"export CUDA='{cuda}'\" >> ~/.profile")
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/tests_fetcher.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Welcome to tests_fetcher V2. This util is designed to fetch tests to run on a PR so that only the tests impacted by the modifications are run, and when too many models are being impacted, only run the tests of a subset of core models. It works like this. Stage 1: Identify the modified files. For jobs that run on the main branch, it's just the diff with the last commit. On a PR, this takes all the files from the branching point to the current commit (so all modifications in a PR, not just the last commit) but excludes modifications that are on docstrings or comments only. Stage 2: Extract the tests to run. This is done by looking at the imports in each module and test file: if module A imports module B, then changing module B impacts module A, so the tests using module A should be run. We thus get the dependencies of each model and then recursively builds the 'reverse' map of dependencies to get all modules and tests impacted by a given file. We then only keep the tests (and only the core models tests if there are too many modules). Caveats: - This module only filters tests by files (not individual tests) so it's better to have tests for different things in different files. - This module assumes inits are just importing things, not really building objects, so it's better to structure them this way and move objects building in separate submodules. Usage: Base use to fetch the tests in a pull request ```bash python utils/tests_fetcher.py ``` Base use to fetch the tests on a the main branch (with diff from the last commit): ```bash python utils/tests_fetcher.py --diff_with_last_commit ``` """ import argparse import collections import importlib.util import json import os import re import tempfile from contextlib import contextmanager from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from git import Repo PATH_TO_REPO = Path(__file__).parent.parent.resolve() PATH_TO_EXAMPLES = PATH_TO_REPO / "examples" PATH_TO_TRANFORMERS = PATH_TO_REPO / "src/transformers" PATH_TO_TESTS = PATH_TO_REPO / "tests" # List here the models to always test. IMPORTANT_MODELS = [ "auto", # Most downloaded models "bert", "clip", "t5", "xlm-roberta", "gpt2", "bart", "mpnet", "gpt-j", "wav2vec2", "deberta-v2", "layoutlm", "opt", "longformer", "vit", # Pipeline-specific model (to be sure each pipeline has one model in this list) "tapas", "vilt", "clap", "detr", "owlvit", "dpt", "videomae", ] @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def clean_code(content: str) -> str: """ Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern comments or docstings). Args: content (`str`): The code to clean Returns: `str`: The cleaned code. """ # We need to deactivate autoformatting here to write escaped triple quotes (we cannot use real triple quotes or # this would mess up the result if this function applied to this particular file). # fmt: off # Remove docstrings by splitting on triple " then triple ': splits = content.split('\"\"\"') content = "".join(splits[::2]) splits = content.split("\'\'\'") # fmt: on content = "".join(splits[::2]) # Remove empty lines and comments lines_to_keep = [] for line in content.split("\n"): # remove anything that is after a # sign. line = re.sub("#.*$", "", line) # remove white lines if len(line) != 0 and not line.isspace(): lines_to_keep.append(line) return "\n".join(lines_to_keep) def keep_doc_examples_only(content: str) -> str: """ Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc tests or not). Args: content (`str`): The code to clean Returns: `str`: The cleaned code. """ # Keep doc examples only by splitting on triple "`" splits = content.split("```") # Add leading and trailing "```" so the navigation is easier when compared to the original input `content` content = "```" + "```".join(splits[1::2]) + "```" # Remove empty lines and comments lines_to_keep = [] for line in content.split("\n"): # remove anything that is after a # sign. line = re.sub("#.*$", "", line) # remove white lines if len(line) != 0 and not line.isspace(): lines_to_keep.append(line) return "\n".join(lines_to_keep) def get_all_tests() -> List[str]: """ Walks the `tests` folder to return a list of files/subfolders. This is used to split the tests to run when using paralellism. The split is: - folders under `tests`: (`tokenization`, `pipelines`, etc) except the subfolder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc. """ # test folders/files directly under `tests` folder tests = os.listdir(PATH_TO_TESTS) tests = [f"tests/{f}" for f in tests if "__pycache__" not in f] tests = sorted([f for f in tests if (PATH_TO_REPO / f).is_dir() or f.startswith("tests/test_")]) # model specific test folders model_test_folders = os.listdir(PATH_TO_TESTS / "models") model_test_folders = [f"tests/models/{f}" for f in model_test_folders if "__pycache__" not in f] model_test_folders = sorted([f for f in model_test_folders if (PATH_TO_REPO / f).is_dir()]) tests.remove("tests/models") # Sagemaker tests are not meant to be run on the CI. if "tests/sagemaker" in tests: tests.remove("tests/sagemaker") tests = model_test_folders + tests return tests def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool: """ Check if the diff is only in docstrings (or comments and whitespace) in a filename. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). branching_point (`str`): The commit reference of where to compare for the diff. filename (`str`): The filename where we want to know if the diff isonly in docstrings/comments. Returns: `bool`: Whether the diff is docstring/comments only or not. """ folder = Path(repo.working_dir) with checkout_commit(repo, branching_point): with open(folder / filename, "r", encoding="utf-8") as f: old_content = f.read() with open(folder / filename, "r", encoding="utf-8") as f: new_content = f.read() old_content_clean = clean_code(old_content) new_content_clean = clean_code(new_content) return old_content_clean == new_content_clean def diff_contains_doc_examples(repo: Repo, branching_point: str, filename: str) -> bool: """ Check if the diff is only in code examples of the doc in a filename. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). branching_point (`str`): The commit reference of where to compare for the diff. filename (`str`): The filename where we want to know if the diff is only in codes examples. Returns: `bool`: Whether the diff is only in code examples of the doc or not. """ folder = Path(repo.working_dir) with checkout_commit(repo, branching_point): with open(folder / filename, "r", encoding="utf-8") as f: old_content = f.read() with open(folder / filename, "r", encoding="utf-8") as f: new_content = f.read() old_content_clean = keep_doc_examples_only(old_content) new_content_clean = keep_doc_examples_only(new_content) return old_content_clean != new_content_clean def get_impacted_files_from_tiny_model_summary(diff_with_last_commit: bool = False) -> List[str]: """ Return a list of python modeling files that are impacted by the changes of `tiny_model_summary.json` in between: - the current head and the main branch if `diff_with_last_commit=False` (default) - the current head and its parent commit otherwise. Returns: `List[str]`: The list of Python modeling files that are impacted by the changes of `tiny_model_summary.json`. """ repo = Repo(PATH_TO_REPO) folder = Path(repo.working_dir) if not diff_with_last_commit: print(f"main is at {repo.refs.main.commit}") print(f"Current head is at {repo.head.commit}") commits = repo.merge_base(repo.refs.main, repo.head) for commit in commits: print(f"Branching commit: {commit}") else: print(f"main is at {repo.head.commit}") commits = repo.head.commit.parents for commit in commits: print(f"Parent commit: {commit}") if not os.path.isfile(folder / "tests/utils/tiny_model_summary.json"): return [] files = set() for commit in commits: with checkout_commit(repo, commit): with open(folder / "tests/utils/tiny_model_summary.json", "r", encoding="utf-8") as f: old_content = f.read() with open(folder / "tests/utils/tiny_model_summary.json", "r", encoding="utf-8") as f: new_content = f.read() # get the content as json object old_content = json.loads(old_content) new_content = json.loads(new_content) old_keys = set(old_content.keys()) new_keys = set(new_content.keys()) # get the difference keys_with_diff = old_keys.symmetric_difference(new_keys) common_keys = old_keys.intersection(new_keys) # if both have the same key, check its content for key in common_keys: if old_content[key] != new_content[key]: keys_with_diff.add(key) # get the model classes impacted_model_classes = [] for key in keys_with_diff: if key in new_keys: impacted_model_classes.extend(new_content[key]["model_classes"]) # get the module where the model classes are defined. We want to use the main `__init__` file, but it requires # all the framework being installed, which is not ideal for a simple script like test fetcher. # So we create a temporary and modified main `__init__` and access its `_import_structure`. with open(folder / "src/transformers/__init__.py") as fp: lines = fp.readlines() new_lines = [] # Get all the code related to `_import_structure` for line in lines: if line == "_import_structure = {\n": new_lines.append(line) elif line == "# Direct imports for type-checking\n": break elif len(new_lines) > 0: # bypass the framework check so we can get all the information even if frameworks are not available line = re.sub(r"is_.+_available\(\)", "True", line) line = line.replace("OptionalDependencyNotAvailable", "Exception") line = line.replace("Exception()", "Exception") new_lines.append(line) # create and load the temporary module with tempfile.TemporaryDirectory() as tmpdirname: with open(os.path.join(tmpdirname, "temp_init.py"), "w") as fp: fp.write("".join(new_lines)) spec = importlib.util.spec_from_file_location("temp_init", os.path.join(tmpdirname, "temp_init.py")) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # Finally, get `_import_structure` that we need import_structure = module._import_structure # map model classes to their defined module reversed_structure = {} for key, values in import_structure.items(): for value in values: reversed_structure[value] = key # Get the corresponding modeling file path for model_class in impacted_model_classes: module = reversed_structure[model_class] framework = "" if model_class.startswith("TF"): framework = "tf" elif model_class.startswith("Flax"): framework = "flax" fn = ( f"modeling_{module.split('.')[-1]}.py" if framework == "" else f"modeling_{framework}_{module.split('.')[-1]}.py" ) files.add( f"src.transformers.{module}.{fn}".replace(".", os.path.sep).replace(f"{os.path.sep}py", ".py") ) return sorted(files) def get_diff(repo: Repo, base_commit: str, commits: List[str]) -> List[str]: """ Get the diff between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files modified are returned if the diff in the file is not only in docstrings or comments, see `diff_is_docstring_only`). """ print("\n### DIFF ###\n") code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): # We always add new python files if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"): code_diff.append(diff_obj.b_path) # We check that deleted python files won't break corresponding tests. elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"): code_diff.append(diff_obj.a_path) # Now for modified files elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"): # In case of renames, we'll look at the tests using both the old and new name. if diff_obj.a_path != diff_obj.b_path: code_diff.extend([diff_obj.a_path, diff_obj.b_path]) else: # Otherwise, we check modifications are in code and not docstrings. if diff_is_docstring_only(repo, commit, diff_obj.b_path): print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.") else: code_diff.append(diff_obj.a_path) return code_diff def get_modified_python_files(diff_with_last_commit: bool = False) -> List[str]: """ Return a list of python files that have been modified between: - the current head and the main branch if `diff_with_last_commit=False` (default) - the current head and its parent commit otherwise. Returns: `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files modified are returned if the diff in the file is not only in docstrings or comments, see `diff_is_docstring_only`). """ repo = Repo(PATH_TO_REPO) if not diff_with_last_commit: print(f"main is at {repo.refs.main.commit}") print(f"Current head is at {repo.head.commit}") branching_commits = repo.merge_base(repo.refs.main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") return get_diff(repo, repo.head.commit, branching_commits) else: print(f"main is at {repo.head.commit}") parent_commits = repo.head.commit.parents for commit in parent_commits: print(f"Parent commit: {commit}") return get_diff(repo, repo.head.commit, parent_commits) def get_diff_for_doctesting(repo: Repo, base_commit: str, commits: List[str]) -> List[str]: """ Get the diff in doc examples between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files modified are returned if the diff in the file is only in doctest examples). """ print("\n### DIFF ###\n") code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): # We only consider Python files and doc files. if not diff_obj.b_path.endswith(".py") and not diff_obj.b_path.endswith(".md"): continue # We always add new python/md files if diff_obj.change_type in ["A"]: code_diff.append(diff_obj.b_path) # Now for modified files elif diff_obj.change_type in ["M", "R"]: # In case of renames, we'll look at the tests using both the old and new name. if diff_obj.a_path != diff_obj.b_path: code_diff.extend([diff_obj.a_path, diff_obj.b_path]) else: # Otherwise, we check modifications contain some doc example(s). if diff_contains_doc_examples(repo, commit, diff_obj.b_path): code_diff.append(diff_obj.a_path) else: print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.") return code_diff def get_all_doctest_files() -> List[str]: """ Return the complete list of python and Markdown files on which we run doctest. At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`. Returns: `List[str]`: The complete list of Python and Markdown files on which we run doctest. """ py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.py")] md_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.md")] test_files_to_run = py_files + md_files # only include files in `src` or `docs/source/en/` test_files_to_run = [x for x in test_files_to_run if x.startswith(("src/", "docs/source/en/"))] # not include init files test_files_to_run = [x for x in test_files_to_run if not x.endswith(("__init__.py",))] # These are files not doctested yet. with open("utils/not_doctested.txt") as fp: not_doctested = {x.split(" ")[0] for x in fp.read().strip().split("\n")} # So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%. test_files_to_run = [x for x in test_files_to_run if x not in not_doctested] return sorted(test_files_to_run) def get_new_doctest_files(repo, base_commit, branching_commit) -> List[str]: """ Get the list of files that were removed from "utils/not_doctested.txt", between `base_commit` and `branching_commit`. Returns: `List[str]`: List of files that were removed from "utils/not_doctested.txt". """ for diff_obj in branching_commit.diff(base_commit): # Ignores all but the "utils/not_doctested.txt" file. if diff_obj.a_path != "utils/not_doctested.txt": continue # Loads the two versions folder = Path(repo.working_dir) with checkout_commit(repo, branching_commit): with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: old_content = f.read() with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: new_content = f.read() # Compute the removed lines and return them removed_content = {x.split(" ")[0] for x in old_content.split("\n")} - { x.split(" ")[0] for x in new_content.split("\n") } return sorted(removed_content) return [] def get_doctest_files(diff_with_last_commit: bool = False) -> List[str]: """ Return a list of python and Markdown files where doc example have been modified between: - the current head and the main branch if `diff_with_last_commit=False` (default) - the current head and its parent commit otherwise. Returns: `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files modified are returned if the diff in the file is only in doctest examples). """ repo = Repo(PATH_TO_REPO) test_files_to_run = [] # noqa if not diff_with_last_commit: print(f"main is at {repo.refs.main.commit}") print(f"Current head is at {repo.head.commit}") branching_commits = repo.merge_base(repo.refs.main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits) else: print(f"main is at {repo.head.commit}") parent_commits = repo.head.commit.parents for commit in parent_commits: print(f"Parent commit: {commit}") test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits) all_test_files_to_run = get_all_doctest_files() # Add to the test files to run any removed entry from "utils/not_doctested.txt". new_test_files = get_new_doctest_files(repo, repo.head.commit, repo.refs.main.commit) test_files_to_run = list(set(test_files_to_run + new_test_files)) # Do not run slow doctest tests on CircleCI with open("utils/slow_documentation_tests.txt") as fp: slow_documentation_tests = set(fp.read().strip().split("\n")) test_files_to_run = [ x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests ] # Make sure we did not end up with a test file that was removed test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()] return sorted(test_files_to_run) # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+(\.+\S+)\s+import\s+([^\n]+) -> Line only contains from .xxx import yyy and we catch .xxx and yyy # (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every # other import. _re_single_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+([^\n]+)(?=\n)") # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\) -> Line continues with from .xxx import (yyy) and we catch .xxx and yyy # yyy will take multiple lines otherwise there wouldn't be parenthesis. _re_multi_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\)") # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+transformers(\S*)\s+import\s+([^\n]+) -> Line only contains from transformers.xxx import yyy and we catch # .xxx and yyy # (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every # other import. _re_single_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+transformers(\S*)\s+import\s+([^\n]+)(?=\n)") # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\) -> Line continues with from transformers.xxx import (yyy) and we # catch .xxx and yyy. yyy will take multiple lines otherwise there wouldn't be parenthesis. _re_multi_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\)") def extract_imports(module_fname: str, cache: Dict[str, List[str]] = None) -> List[str]: """ Get the imports a given module makes. Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that is a subfolder will give its init file). """ if cache is not None and module_fname in cache: return cache[module_fname] with open(PATH_TO_REPO / module_fname, "r", encoding="utf-8") as f: content = f.read() # Filter out all docstrings to not get imports in code examples. As before we need to deactivate formatting to # keep this as escaped quotes and avoid this function failing on this file. splits = content.split('\"\"\"') # fmt: skip content = "".join(splits[::2]) module_parts = str(module_fname).split(os.path.sep) imported_modules = [] # Let's start with relative imports relative_imports = _re_single_line_relative_imports.findall(content) relative_imports = [ (mod, imp) for mod, imp in relative_imports if "# tests_ignore" not in imp and imp.strip() != "(" ] multiline_relative_imports = _re_multi_line_relative_imports.findall(content) relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if "# tests_ignore" not in imp] # We need to remove parts of the module name depending on the depth of the relative imports. for module, imports in relative_imports: level = 0 while module.startswith("."): module = module[1:] level += 1 if len(module) > 0: dep_parts = module_parts[: len(module_parts) - level] + module.split(".") else: dep_parts = module_parts[: len(module_parts) - level] imported_module = os.path.sep.join(dep_parts) imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) # Let's continue with direct imports direct_imports = _re_single_line_direct_imports.findall(content) direct_imports = [(mod, imp) for mod, imp in direct_imports if "# tests_ignore" not in imp and imp.strip() != "("] multiline_direct_imports = _re_multi_line_direct_imports.findall(content) direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if "# tests_ignore" not in imp] # We need to find the relative path of those imports. for module, imports in direct_imports: import_parts = module.split(".")[1:] # ignore the name of the repo since we add it below. dep_parts = ["src", "transformers"] + import_parts imported_module = os.path.sep.join(dep_parts) imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) result = [] # Double check we get proper modules (either a python file or a folder with an init). for module_file, imports in imported_modules: if (PATH_TO_REPO / f"{module_file}.py").is_file(): module_file = f"{module_file}.py" elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / "__init__.py").is_file(): module_file = os.path.sep.join([module_file, "__init__.py"]) imports = [imp for imp in imports if len(imp) > 0 and re.match("^[A-Za-z0-9_]*$", imp)] if len(imports) > 0: result.append((module_file, imports)) if cache is not None: cache[module_fname] = result return result def get_module_dependencies(module_fname: str, cache: Dict[str, List[str]] = None) -> List[str]: """ Refines the result of `extract_imports` to remove subfolders and get a proper list of module filenames: if a file as an import `from utils import Foo, Bar`, with `utils` being a subfolder containing many files, this will traverse the `utils` init file to check where those dependencies come from: for instance the files utils/foo.py and utils/bar.py. Warning: This presupposes that all intermediate inits are properly built (with imports from the respective submodules) and work better if objects are defined in submodules and not the intermediate init (otherwise the intermediate init is added, and inits usually have a lot of dependencies). Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (with submodule imports refined). """ dependencies = [] imported_modules = extract_imports(module_fname, cache=cache) # The while loop is to recursively traverse all inits we may encounter: we will add things as we go. while len(imported_modules) > 0: new_modules = [] for module, imports in imported_modules: # If we end up in an __init__ we are often not actually importing from this init (except in the case where # the object is fully defined in the __init__) if module.endswith("__init__.py"): # So we get the imports from that init then try to find where our objects come from. new_imported_modules = extract_imports(module, cache=cache) for new_module, new_imports in new_imported_modules: if any(i in new_imports for i in imports): if new_module not in dependencies: new_modules.append((new_module, [i for i in new_imports if i in imports])) imports = [i for i in imports if i not in new_imports] if len(imports) > 0: # If there are any objects lefts, they may be a submodule path_to_module = PATH_TO_REPO / module.replace("__init__.py", "") dependencies.extend( [ os.path.join(module.replace("__init__.py", ""), f"{i}.py") for i in imports if (path_to_module / f"{i}.py").is_file() ] ) imports = [i for i in imports if not (path_to_module / f"{i}.py").is_file()] if len(imports) > 0: # Then if there are still objects left, they are fully defined in the init, so we keep it as a # dependency. dependencies.append(module) else: dependencies.append(module) imported_modules = new_modules return dependencies def create_reverse_dependency_tree() -> List[Tuple[str, str]]: """ Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files. """ cache = {} all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)] return list(set(edges)) def get_tree_starting_at(module: str, edges: List[Tuple[str, str]]) -> List[Union[str, List[str]]]: """ Returns the tree starting at a given module following all edges. Args: module (`str`): The module that will be the root of the subtree we want. eges (`List[Tuple[str, str]]`): The list of all edges of the tree. Returns: `List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges starting at module], [list of edges starting at the preceding level], ...] """ vertices_seen = [module] new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and "__init__.py" not in edge[1]] tree = [module] while len(new_edges) > 0: tree.append(new_edges) final_vertices = list({edge[1] for edge in new_edges}) vertices_seen.extend(final_vertices) new_edges = [ edge for edge in edges if edge[0] in final_vertices and edge[1] not in vertices_seen and "__init__.py" not in edge[1] ] return tree def print_tree_deps_of(module, all_edges=None): """ Prints the tree of modules depending on a given module. Args: module (`str`): The module that will be the root of the subtree we want. all_eges (`List[Tuple[str, str]]`, *optional*): The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed. """ if all_edges is None: all_edges = create_reverse_dependency_tree() tree = get_tree_starting_at(module, all_edges) # The list of lines is a list of tuples (line_to_be_printed, module) # Keeping the modules lets us know where to insert each new lines in the list. lines = [(tree[0], tree[0])] for index in range(1, len(tree)): edges = tree[index] start_edges = {edge[0] for edge in edges} for start in start_edges: end_edges = {edge[1] for edge in edges if edge[0] == start} # We will insert all those edges just after the line showing start. pos = 0 while lines[pos][1] != start: pos += 1 lines = lines[: pos + 1] + [(" " * (2 * index) + end, end) for end in end_edges] + lines[pos + 1 :] for line in lines: # We don't print the refs that where just here to help build lines. print(line[0]) def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]: """ The test examples do not import from the examples (which are just scripts, not modules) so we need som extra care initializing the dependency map, which is the goal of this function. It initializes the dependency map for example files by linking each example to the example test file for the example framework. Returns: `Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a dict test example file to list of example files potentially tested by that test file, and the list of all example files (to avoid recomputing it later). """ test_example_deps = {} all_examples = [] for framework in ["flax", "pytorch", "tensorflow"]: test_files = list((PATH_TO_EXAMPLES / framework).glob("test_*.py")) all_examples.extend(test_files) # Remove the files at the root of examples/framework since they are not proper examples (they are eith utils # or example test files). examples = [ f for f in (PATH_TO_EXAMPLES / framework).glob("**/*.py") if f.parent != PATH_TO_EXAMPLES / framework ] all_examples.extend(examples) for test_file in test_files: with open(test_file, "r", encoding="utf-8") as f: content = f.read() # Map all examples to the test files found in examples/framework. test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [ str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content ] # Also map the test files to themselves. test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append( str(test_file.relative_to(PATH_TO_REPO)) ) return test_example_deps, all_examples def create_reverse_dependency_map() -> Dict[str, List[str]]: """ Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively. Returns: `Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames depending on it recursively. This way the tests impacted by a change in file A are the test files in the list corresponding to key A in this result. """ cache = {} # Start from the example deps init. example_deps, examples = init_test_examples_dependencies() # Add all modules and all tests to all examples all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) + examples all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] # Compute the direct dependencies of all modules. direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} direct_deps.update(example_deps) # This recurses the dependencies something_changed = True while something_changed: something_changed = False for m in all_modules: for d in direct_deps[m]: # We stop recursing at an init (cause we always end up in the main init and we don't want to add all # files which the main init imports) if d.endswith("__init__.py"): continue if d not in direct_deps: raise ValueError(f"KeyError:{d}. From {m}") new_deps = set(direct_deps[d]) - set(direct_deps[m]) if len(new_deps) > 0: direct_deps[m].extend(list(new_deps)) something_changed = True # Finally we can build the reverse map. reverse_map = collections.defaultdict(list) for m in all_modules: for d in direct_deps[m]: reverse_map[d].append(m) # For inits, we don't do the reverse deps but the direct deps: if modifying an init, we want to make sure we test # all the modules impacted by that init. for m in [f for f in all_modules if f.endswith("__init__.py")]: direct_deps = get_module_dependencies(m, cache=cache) deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps) reverse_map[m] = list(set(deps) - {m}) return reverse_map def create_module_to_test_map( reverse_map: Dict[str, List[str]] = None, filter_models: bool = False ) -> Dict[str, List[str]]: """ Extract the tests from the reverse_dependency_map and potentially filters the model tests. Args: reverse_map (`Dict[str, List[str]]`, *optional*): The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of that function if not provided. filter_models (`bool`, *optional*, defaults to `False`): Whether or not to filter model tests to only include core models if a file impacts a lot of models. Returns: `Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified. """ if reverse_map is None: reverse_map = create_reverse_dependency_map() # Utility that tells us if a given file is a test (taking test examples into account) def is_test(fname): if fname.startswith("tests"): return True if fname.startswith("examples") and fname.split(os.path.sep)[-1].startswith("test"): return True return False # Build the test map test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()} if not filter_models: return test_map # Now we deal with the filtering if `filter_models` is True. num_model_tests = len(list(PATH_TO_TESTS.glob("models/*"))) def has_many_models(tests): # We filter to core models when a given file impacts more than half the model tests. model_tests = {Path(t).parts[2] for t in tests if t.startswith("tests/models/")} return len(model_tests) > num_model_tests // 2 def filter_tests(tests): return [t for t in tests if not t.startswith("tests/models/") or Path(t).parts[2] in IMPORTANT_MODELS] return {module: (filter_tests(tests) if has_many_models(tests) else tests) for module, tests in test_map.items()} def check_imports_all_exist(): """ Isn't used per se by the test fetcher but might be used later as a quality check. Putting this here for now so the code is not lost. This checks all imports in a given file do exist. """ cache = {} all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} for module, deps in direct_deps.items(): for dep in deps: if not (PATH_TO_REPO / dep).is_file(): print(f"{module} has dependency on {dep} which does not exist.") def _print_list(l) -> str: """ Pretty print a list of elements with one line per element and a - starting each line. """ return "\n".join([f"- {f}" for f in l]) def create_json_map(test_files_to_run: List[str], json_output_file: str): """ Creates a map from a list of tests to run to easily split them by category, when running parallelism of slow tests. Args: test_files_to_run (`List[str]`): The list of tests to run. json_output_file (`str`): The path where to store the built json map. """ if json_output_file is None: return test_map = {} for test_file in test_files_to_run: # `test_file` is a path to a test folder/file, starting with `tests/`. For example, # - `tests/models/bert/test_modeling_bert.py` or `tests/models/bert` # - `tests/trainer/test_trainer.py` or `tests/trainer` # - `tests/test_modeling_common.py` names = test_file.split(os.path.sep) if names[1] == "models": # take the part like `models/bert` for modeling tests key = os.path.sep.join(names[1:3]) elif len(names) > 2 or not test_file.endswith(".py"): # test folders under `tests` or python files under them # take the part like tokenization, `pipeline`, etc. for other test categories key = os.path.sep.join(names[1:2]) else: # common test files directly under `tests/` key = "common" if key not in test_map: test_map[key] = [] test_map[key].append(test_file) # sort the keys & values keys = sorted(test_map.keys()) test_map = {k: " ".join(sorted(test_map[k])) for k in keys} with open(json_output_file, "w", encoding="UTF-8") as fp: json.dump(test_map, fp, ensure_ascii=False) def infer_tests_to_run( output_file: str, diff_with_last_commit: bool = False, filter_models: bool = True, json_output_file: Optional[str] = None, ): """ The main function called by the test fetcher. Determines the tests to run from the diff. Args: output_file (`str`): The path where to store the summary of the test fetcher analysis. Other files will be stored in the same folder: - examples_test_list.txt: The list of examples tests to run. - test_repo_utils.txt: Will indicate if the repo utils tests should be run or not. - doctest_list.txt: The list of doctests to run. diff_with_last_commit (`bool`, *optional*, defaults to `False`): Whether to analyze the diff with the last commit (for use on the main branch after a PR is merged) or with the branching point from main (for use on each PR). filter_models (`bool`, *optional*, defaults to `True`): Whether or not to filter the tests to core models only, when a file modified results in a lot of model tests. json_output_file (`str`, *optional*): The path where to store the json file mapping categories of tests to tests to run (used for parallelism or the slow tests). """ modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit) print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}") # Create the map that will give us all impacted modules. reverse_map = create_reverse_dependency_map() impacted_files = modified_files.copy() for f in modified_files: if f in reverse_map: impacted_files.extend(reverse_map[f]) # Remove duplicates impacted_files = sorted(set(impacted_files)) print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}") # Grab the corresponding test files: if any(x in modified_files for x in ["setup.py", ".circleci/create_circleci_config.py"]): test_files_to_run = ["tests", "examples"] repo_utils_launch = True else: # All modified tests need to be run. test_files_to_run = [ f for f in modified_files if f.startswith("tests") and f.split(os.path.sep)[-1].startswith("test") ] impacted_files = get_impacted_files_from_tiny_model_summary(diff_with_last_commit=diff_with_last_commit) # Then we grab the corresponding test files. test_map = create_module_to_test_map(reverse_map=reverse_map, filter_models=filter_models) for f in modified_files + impacted_files: if f in test_map: test_files_to_run.extend(test_map[f]) test_files_to_run = sorted(set(test_files_to_run)) # Remove repo utils tests test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == "repo_utils"] # Remove SageMaker tests test_files_to_run = [f for f in test_files_to_run if not f.split(os.path.sep)[1] == "sagemaker"] # Make sure we did not end up with a test file that was removed test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()] repo_utils_launch = any(f.split(os.path.sep)[0] == "utils" for f in modified_files) if repo_utils_launch: repo_util_file = Path(output_file).parent / "test_repo_utils.txt" with open(repo_util_file, "w", encoding="utf-8") as f: f.write("tests/repo_utils") examples_tests_to_run = [f for f in test_files_to_run if f.startswith("examples")] test_files_to_run = [f for f in test_files_to_run if not f.startswith("examples")] print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}") if len(test_files_to_run) > 0: with open(output_file, "w", encoding="utf-8") as f: f.write(" ".join(test_files_to_run)) # Create a map that maps test categories to test files, i.e. `models/bert` -> [...test_modeling_bert.py, ...] # Get all test directories (and some common test files) under `tests` and `tests/models` if `test_files_to_run` # contains `tests` (i.e. when `setup.py` is changed). if "tests" in test_files_to_run: test_files_to_run = get_all_tests() create_json_map(test_files_to_run, json_output_file) print(f"\n### EXAMPLES TEST TO RUN ###\n{_print_list(examples_tests_to_run)}") if len(examples_tests_to_run) > 0: # We use `all` in the case `commit_flags["test_all"]` as well as in `create_circleci_config.py` for processing if examples_tests_to_run == ["examples"]: examples_tests_to_run = ["all"] example_file = Path(output_file).parent / "examples_test_list.txt" with open(example_file, "w", encoding="utf-8") as f: f.write(" ".join(examples_tests_to_run)) doctest_list = get_doctest_files() print(f"\n### DOCTEST TO RUN ###\n{_print_list(doctest_list)}") if len(doctest_list) > 0: doctest_file = Path(output_file).parent / "doctest_list.txt" with open(doctest_file, "w", encoding="utf-8") as f: f.write(" ".join(doctest_list)) def filter_tests(output_file: str, filters: List[str]): """ Reads the content of the output file and filters out all the tests in a list of given folders. Args: output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher. filters (`List[str]`): A list of folders to filter. """ if not os.path.isfile(output_file): print("No test file found.") return with open(output_file, "r", encoding="utf-8") as f: test_files = f.read().split(" ") if len(test_files) == 0 or test_files == [""]: print("No tests to filter.") return if test_files == ["tests"]: test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py"] + filters] else: test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters] with open(output_file, "w", encoding="utf-8") as f: f.write(" ".join(test_files)) def parse_commit_message(commit_message: str) -> Dict[str, bool]: """ Parses the commit message to detect if a command is there to skip, force all or part of the CI. Args: commit_message (`str`): The commit message of the current commit. Returns: `Dict[str, bool]`: A dictionary of strings to bools with keys the following keys: `"skip"`, `"test_all_models"` and `"test_all"`. """ if commit_message is None: return {"skip": False, "no_filter": False, "test_all": False} command_search = re.search(r"\[([^\]]*)\]", commit_message) if command_search is not None: command = command_search.groups()[0] command = command.lower().replace("-", " ").replace("_", " ") skip = command in ["ci skip", "skip ci", "circleci skip", "skip circleci"] no_filter = set(command.split(" ")) == {"no", "filter"} test_all = set(command.split(" ")) == {"test", "all"} return {"skip": skip, "no_filter": no_filter, "test_all": test_all} else: return {"skip": False, "no_filter": False, "test_all": False} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run" ) parser.add_argument( "--json_output_file", type=str, default="test_map.json", help="Where to store the tests to run in a dictionary format mapping test categories to test files", ) parser.add_argument( "--diff_with_last_commit", action="store_true", help="To fetch the tests between the current commit and the last commit", ) parser.add_argument( "--filter_tests", action="store_true", help="Will filter the pipeline/repo utils tests outside of the generated list of tests.", ) parser.add_argument( "--print_dependencies_of", type=str, help="Will only print the tree of modules depending on the file passed.", default=None, ) parser.add_argument( "--commit_message", type=str, help="The commit message (which could contain a command to force all tests or skip the CI).", default=None, ) args = parser.parse_args() if args.print_dependencies_of is not None: print_tree_deps_of(args.print_dependencies_of) elif args.filter_tests: filter_tests(args.output_file, ["pipelines", "repo_utils"]) else: repo = Repo(PATH_TO_REPO) commit_message = repo.head.commit.message commit_flags = parse_commit_message(commit_message) if commit_flags["skip"]: print("Force-skipping the CI") quit() if commit_flags["no_filter"]: print("Running all tests fetched without filtering.") if commit_flags["test_all"]: print("Force-launching all tests") diff_with_last_commit = args.diff_with_last_commit if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main: print("main branch detected, fetching tests against last commit.") diff_with_last_commit = True if not commit_flags["test_all"]: try: infer_tests_to_run( args.output_file, diff_with_last_commit=diff_with_last_commit, json_output_file=args.json_output_file, filter_models=not commit_flags["no_filter"], ) filter_tests(args.output_file, ["repo_utils"]) except Exception as e: print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.") commit_flags["test_all"] = True if commit_flags["test_all"]: with open(args.output_file, "w", encoding="utf-8") as f: f.write("tests") example_file = Path(args.output_file).parent / "examples_test_list.txt" with open(example_file, "w", encoding="utf-8") as f: f.write("all") test_files_to_run = get_all_tests() create_json_map(test_files_to_run, args.json_output_file)
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/download_glue_data.py
""" Script for downloading all GLUE data. Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e Note: for legal reasons, we are unable to host MRPC. You can either use the version hosted by the SentEval team, which is already tokenized, or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually. For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example). You should then rename and place specific files in a folder (see below for an example). mkdir MRPC cabextract MSRParaphraseCorpus.msi -d MRPC cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt rm MRPC/_* rm MSRParaphraseCorpus.msi 1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now. 2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray! """ import argparse import os import sys import urllib.request import zipfile TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] TASK2PATH = { "CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4", "SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8", "MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc", "QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5", "STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5", "MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce", "SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df", "QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601", "RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb", "WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf", "diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D", } MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt" MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt" def download_and_extract(task, data_dir): print(f"Downloading and extracting {task}...") data_file = f"{task}.zip" urllib.request.urlretrieve(TASK2PATH[task], data_file) with zipfile.ZipFile(data_file) as zip_ref: zip_ref.extractall(data_dir) os.remove(data_file) print("\tCompleted!") def format_mrpc(data_dir, path_to_data): print("Processing MRPC...") mrpc_dir = os.path.join(data_dir, "MRPC") if not os.path.isdir(mrpc_dir): os.mkdir(mrpc_dir) if path_to_data: mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") else: print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) if not os.path.isfile(mrpc_train_file): raise ValueError(f"Train data not found at {mrpc_train_file}") if not os.path.isfile(mrpc_test_file): raise ValueError(f"Test data not found at {mrpc_test_file}") urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) dev_ids = [] with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: for row in ids_fh: dev_ids.append(row.strip().split("\t")) with open(mrpc_train_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8" ) as train_fh, open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh: header = data_fh.readline() train_fh.write(header) dev_fh.write(header) for row in data_fh: label, id1, id2, s1, s2 = row.strip().split("\t") if [id1, id2] in dev_ids: dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) else: train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) with open(mrpc_test_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8" ) as test_fh: header = data_fh.readline() test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") for idx, row in enumerate(data_fh): label, id1, id2, s1, s2 = row.strip().split("\t") test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) print("\tCompleted!") def download_diagnostic(data_dir): print("Downloading and extracting diagnostic...") if not os.path.isdir(os.path.join(data_dir, "diagnostic")): os.mkdir(os.path.join(data_dir, "diagnostic")) data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) print("\tCompleted!") return def get_tasks(task_names): task_names = task_names.split(",") if "all" in task_names: tasks = TASKS else: tasks = [] for task_name in task_names: if task_name not in TASKS: raise ValueError(f"Task {task_name} not found!") tasks.append(task_name) return tasks def main(arguments): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", help="directory to save data to", type=str, default="glue_data") parser.add_argument( "--tasks", help="tasks to download data for as a comma separated string", type=str, default="all" ) parser.add_argument( "--path_to_mrpc", help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt", type=str, default="", ) args = parser.parse_args(arguments) if not os.path.isdir(args.data_dir): os.mkdir(args.data_dir) tasks = get_tasks(args.tasks) for task in tasks: if task == "MRPC": format_mrpc(args.data_dir, args.path_to_mrpc) elif task == "diagnostic": download_diagnostic(args.data_dir) else: download_and_extract(task, args.data_dir) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/notification_service.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import collections import functools import json import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from get_ci_error_statistics import get_job_links from get_previous_daily_ci import get_last_daily_ci_reports from slack_sdk import WebClient client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) NON_MODEL_TEST_MODULES = [ "benchmark", "deepspeed", "extended", "fixtures", "generation", "onnx", "optimization", "pipelines", "sagemaker", "trainer", "utils", ] def handle_test_results(test_results): expressions = test_results.split(" ") failed = 0 success = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(expressions): if "failed" in expression: failed += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) return failed, success, time_spent def handle_stacktraces(test_results): # These files should follow the following architecture: # === FAILURES === # <path>:<line>: Error ... # <path>:<line>: Error ... # <empty line> total_stacktraces = test_results.split("\n")[1:-1] stacktraces = [] for stacktrace in total_stacktraces: try: line = stacktrace[: stacktrace.index(" ")].split(":")[-2] error_message = stacktrace[stacktrace.index(" ") :] stacktraces.append(f"(line {line}) {error_message}") except Exception: stacktraces.append("Cannot retrieve error message.") return stacktraces def dicts_to_sum(objects: Union[Dict[str, Dict], List[dict]]): if isinstance(objects, dict): lists = objects.values() else: lists = objects # Convert each dictionary to counter counters = map(collections.Counter, lists) # Sum all the counters return functools.reduce(operator.add, counters) class Message: def __init__( self, title: str, ci_title: str, model_results: Dict, additional_results: Dict, selected_warnings: List = None, prev_ci_artifacts=None, ): self.title = title self.ci_title = ci_title # Failures and success of the modeling tests self.n_model_success = sum(r["success"] for r in model_results.values()) self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values()) self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values()) # Some suites do not have a distinction between single and multi GPU. self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values()) self.n_model_failures = ( self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures ) # Failures and success of the additional tests self.n_additional_success = sum(r["success"] for r in additional_results.values()) if len(additional_results) > 0: # `dicts_to_sum` uses `dicts_to_sum` which requires a non empty dictionary. Let's just add an empty entry. all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()]) self.n_additional_single_gpu_failures = all_additional_failures["single"] self.n_additional_multi_gpu_failures = all_additional_failures["multi"] self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"] else: self.n_additional_single_gpu_failures = 0 self.n_additional_multi_gpu_failures = 0 self.n_additional_unknown_gpu_failures = 0 self.n_additional_failures = ( self.n_additional_single_gpu_failures + self.n_additional_multi_gpu_failures + self.n_additional_unknown_gpu_failures ) # Results self.n_failures = self.n_model_failures + self.n_additional_failures self.n_success = self.n_model_success + self.n_additional_success self.n_tests = self.n_failures + self.n_success self.model_results = model_results self.additional_results = additional_results self.thread_ts = None if selected_warnings is None: selected_warnings = [] self.selected_warnings = selected_warnings self.prev_ci_artifacts = prev_ci_artifacts @property def time(self) -> str: all_results = [*self.model_results.values(), *self.additional_results.values()] time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])] total_secs = 0 for time in time_spent: time_parts = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(time_parts) == 1: time_parts = [0, 0, time_parts[0]] hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f"{int(hours)}h{int(minutes)}m{int(seconds)}s" @property def header(self) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def ci_title_section(self) -> Dict: return {"type": "section", "text": {"type": "mrkdwn", "text": self.ci_title}} @property def no_failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n" f"Number of model failures: {self.n_model_failures}.\n" f"The suite ran in {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def warnings(self) -> Dict: # If something goes wrong, let's avoid the CI report failing to be sent. button_text = "Check warnings (Link not found)" # Use the workflow run link job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}" if "Extract warnings in CI artifacts" in github_actions_job_links: button_text = "Check warnings" # Use the actual job link job_link = f"{github_actions_job_links['Extract warnings in CI artifacts']}" huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x] text = f"There are {len(self.selected_warnings)} warnings being selected." text += f"\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`." return { "type": "section", "text": { "type": "plain_text", "text": text, "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": button_text, "emoji": True}, "url": job_link, }, } @staticmethod def get_device_report(report, rjust=6): if "single" in report and "multi" in report: return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | " elif "single" in report: return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | " elif "multi" in report: return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | " @property def category_failures(self) -> Dict: model_failures = [v["failed"] for v in self.model_results.values()] category_failures = {} for model_failure in model_failures: for key, value in model_failure.items(): if key not in category_failures: category_failures[key] = dict(value) else: category_failures[key]["unclassified"] += value["unclassified"] category_failures[key]["single"] += value["single"] category_failures[key]["multi"] += value["multi"] individual_reports = [] for key, value in category_failures.items(): device_report = self.get_device_report(value) if sum(value.values()): if device_report: individual_reports.append(f"{device_report}{key}") else: individual_reports.append(key) header = "Single | Multi | Category\n" category_failures_report = prepare_reports( title="The following modeling categories had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}} def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report): # noqa # Remove the leading and training parts that don't contain failure count information. model_failures = curr_failure_report.split("\n")[3:-2] prev_model_failures = prev_failure_report.split("\n")[3:-2] entries_changed = set(model_failures).difference(prev_model_failures) prev_map = {} for f in prev_model_failures: items = [x.strip() for x in f.split("| ")] prev_map[items[-1]] = [int(x) for x in items[:-1]] curr_map = {} for f in entries_changed: items = [x.strip() for x in f.split("| ")] curr_map[items[-1]] = [int(x) for x in items[:-1]] diff_map = {} for k, v in curr_map.items(): if k not in prev_map: diff_map[k] = v else: diff = [x - y for x, y in zip(v, prev_map[k])] if max(diff) > 0: diff_map[k] = diff entries_changed = [] for model_name, diff_values in diff_map.items(): diff = [str(x) for x in diff_values] diff = [f"+{x}" if (x != "0" and not x.startswith("-")) else x for x in diff] diff = [x.rjust(9) for x in diff] device_report = " | ".join(diff) + " | " report = f"{device_report}{model_name}" entries_changed.append(report) entries_changed = sorted(entries_changed, key=lambda s: s.split("| ")[-1]) return entries_changed @property def model_failures(self) -> List[Dict]: # Obtain per-model failures def per_model_sum(model_category_dict): return dicts_to_sum(model_category_dict["failed"].values()) failures = {} non_model_failures = { k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values()) } for k, v in self.model_results.items(): if k in NON_MODEL_TEST_MODULES: pass if sum(per_model_sum(v).values()): dict_failed = dict(v["failed"]) pytorch_specific_failures = dict_failed.pop("PyTorch") tensorflow_specific_failures = dict_failed.pop("TensorFlow") other_failures = dicts_to_sum(dict_failed.values()) failures[k] = { "PyTorch": pytorch_specific_failures, "TensorFlow": tensorflow_specific_failures, "other": other_failures, } model_reports = [] other_module_reports = [] for key, value in non_model_failures.items(): if key in NON_MODEL_TEST_MODULES: device_report = self.get_device_report(value) if sum(value.values()): if device_report: report = f"{device_report}{key}" else: report = key other_module_reports.append(report) for key, value in failures.items(): device_report_values = [ value["PyTorch"]["single"], value["PyTorch"]["multi"], value["TensorFlow"]["single"], value["TensorFlow"]["multi"], sum(value["other"].values()), ] if sum(device_report_values): device_report = " | ".join([str(x).rjust(9) for x in device_report_values]) + " | " report = f"{device_report}{key}" model_reports.append(report) # (Possibly truncated) reports for the current workflow run - to be sent to Slack channels model_header = "Single PT | Multi PT | Single TF | Multi TF | Other | Category\n" sorted_model_reports = sorted(model_reports, key=lambda s: s.split("| ")[-1]) model_failures_report = prepare_reports( title="These following model modules had failures", header=model_header, reports=sorted_model_reports ) module_header = "Single | Multi | Category\n" sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("| ")[-1]) module_failures_report = prepare_reports( title="The following non-model modules had failures", header=module_header, reports=sorted_module_reports ) # To be sent to Slack channels model_failure_sections = [ {"type": "section", "text": {"type": "mrkdwn", "text": model_failures_report}}, {"type": "section", "text": {"type": "mrkdwn", "text": module_failures_report}}, ] # Save the complete (i.e. no truncation) failure tables (of the current workflow run) # (to be uploaded as artifacts) model_failures_report = prepare_reports( title="These following model modules had failures", header=model_header, reports=sorted_model_reports, to_truncate=False, ) file_path = os.path.join(os.getcwd(), "prev_ci_results/model_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(model_failures_report) module_failures_report = prepare_reports( title="The following non-model modules had failures", header=module_header, reports=sorted_module_reports, to_truncate=False, ) file_path = os.path.join(os.getcwd(), "prev_ci_results/module_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(module_failures_report) if self.prev_ci_artifacts is not None: # if the last run produces artifact named `prev_ci_results` if ( "prev_ci_results" in self.prev_ci_artifacts and "model_failures_report.txt" in self.prev_ci_artifacts["prev_ci_results"] ): # Compute the difference of the previous/current (model failure) table prev_model_failures = self.prev_ci_artifacts["prev_ci_results"]["model_failures_report.txt"] entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures) if len(entries_changed) > 0: # Save the complete difference diff_report = prepare_reports( title="Changed model modules failures", header=model_header, reports=entries_changed, to_truncate=False, ) file_path = os.path.join(os.getcwd(), "prev_ci_results/changed_model_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(diff_report) # To be sent to Slack channels diff_report = prepare_reports( title="*Changed model modules failures*", header=model_header, reports=entries_changed, ) model_failure_sections.append( {"type": "section", "text": {"type": "mrkdwn", "text": diff_report}}, ) return model_failure_sections @property def additional_failures(self) -> Dict: failures = {k: v["failed"] for k, v in self.additional_results.items()} errors = {k: v["error"] for k, v in self.additional_results.items()} individual_reports = [] for key, value in failures.items(): device_report = self.get_device_report(value) if sum(value.values()) or errors[key]: report = f"{key}" if errors[key]: report = f"[Errored out] {report}" if device_report: report = f"{device_report}{report}" individual_reports.append(report) header = "Single | Multi | Category\n" failures_report = prepare_reports( title="The following non-modeling tests had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}} @property def payload(self) -> str: blocks = [self.header] if self.ci_title: blocks.append(self.ci_title_section) if self.n_model_failures > 0 or self.n_additional_failures > 0: blocks.append(self.failures) if self.n_model_failures > 0: blocks.append(self.category_failures) for block in self.model_failures: if block["text"]["text"]: blocks.append(block) if self.n_additional_failures > 0: blocks.append(self.additional_failures) if self.n_model_failures == 0 and self.n_additional_failures == 0: blocks.append(self.no_failures) if len(self.selected_warnings) > 0: blocks.append(self.warnings) new_failure_blocks = self.get_new_model_failure_blocks(with_header=False) if len(new_failure_blocks) > 0: blocks.extend(new_failure_blocks) return json.dumps(blocks) @staticmethod def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False): blocks = [] title_block = {"type": "header", "text": {"type": "plain_text", "text": title}} blocks.append(title_block) if ci_title: ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}} blocks.append(ci_title_block) offline_runners = [] if runner_not_available: text = "💔 CI runners are not available! Tests are not run. 😭" result = os.environ.get("OFFLINE_RUNNERS") if result is not None: offline_runners = json.loads(result) elif runner_failed: text = "💔 CI runners have problems! Tests are not run. 😭" elif setup_failed: text = "💔 Setup job failed. Tests are not run. 😭" else: text = "💔 There was an issue running the tests. 😭" error_block_1 = { "type": "header", "text": { "type": "plain_text", "text": text, }, } text = "" if len(offline_runners) > 0: text = "\n • " + "\n • ".join(offline_runners) text = f"The following runners are offline:\n{text}\n\n" text += "🙏 Let's fix it ASAP! 🙏" error_block_2 = { "type": "section", "text": { "type": "plain_text", "text": text, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } blocks.extend([error_block_1, error_block_2]) payload = json.dumps(blocks) print("Sending the following payload") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], text=text, blocks=payload, ) def post(self): payload = self.payload print("Sending the following payload") print(json.dumps({"blocks": json.loads(payload)})) text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], blocks=payload, text=text, ) def get_reply_blocks(self, job_name, job_result, failures, device, text): """ failures: A list with elements of the form {"line": full test name, "trace": error trace} """ # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary MAX_ERROR_TEXT = 3000 - len("[Truncated]") failure_text = "" for idx, error in enumerate(failures): new_text = failure_text + f'*{error["line"]}*\n_{error["trace"]}_\n\n' if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text title = job_name if device is not None: title += f" ({device}-gpu)" content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} # TODO: Make sure we always have a valid job link (or at least a way not to break the report sending) # Currently we get the device from a job's artifact name. # If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`. # This could be done by adding `machine_type` in a job's `strategy`. # (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`) if job_result["job_link"] is not None and job_result["job_link"][device] is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_result["job_link"][device], } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ] def get_new_model_failure_blocks(self, with_header=True): if self.prev_ci_artifacts is None: return {} sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0]) prev_model_results = {} if ( "prev_ci_results" in self.prev_ci_artifacts and "model_results.json" in self.prev_ci_artifacts["prev_ci_results"] ): prev_model_results = json.loads(self.prev_ci_artifacts["prev_ci_results"]["model_results.json"]) all_failure_lines = {} for job, job_result in sorted_dict: if len(job_result["failures"]): devices = sorted(job_result["failures"].keys(), reverse=True) for device in devices: failures = job_result["failures"][device] prev_error_lines = {} if job in prev_model_results and device in prev_model_results[job]["failures"]: prev_error_lines = {error["line"] for error in prev_model_results[job]["failures"][device]} url = None if job_result["job_link"] is not None and job_result["job_link"][device] is not None: url = job_result["job_link"][device] for idx, error in enumerate(failures): if error["line"] in prev_error_lines: continue new_text = f'{error["line"]}\n\n' if new_text not in all_failure_lines: all_failure_lines[new_text] = [] all_failure_lines[new_text].append(f"<{url}|{device}>" if url is not None else device) MAX_ERROR_TEXT = 3000 - len("[Truncated]") - len("```New model failures```\n\n") failure_text = "" for line, devices in all_failure_lines.items(): new_text = failure_text + f"{'|'.join(devices)} gpu\n{line}" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text blocks = [] if failure_text: if with_header: blocks.append( {"type": "header", "text": {"type": "plain_text", "text": "New model failures", "emoji": True}} ) else: failure_text = f"*New model failures*\n\n{failure_text}" blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}) return blocks def post_reply(self): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): for device, failures in job_result["failures"].items(): text = "\n".join( sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]]) ) blocks = self.get_reply_blocks(job, job_result, failures, device, text=text) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) for job, job_result in self.additional_results.items(): if len(job_result["failures"]): for device, failures in job_result["failures"].items(): blocks = self.get_reply_blocks( job, job_result, failures, device, text=f'Number of failures: {job_result["failed"][device]}', ) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) blocks = self.get_new_model_failure_blocks() if blocks: print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], text="Results for new failures", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) def retrieve_artifact(artifact_path: str, gpu: Optional[str]): if gpu not in [None, "single", "multi"]: raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.") _artifact = {} if os.path.exists(artifact_path): files = os.listdir(artifact_path) for file in files: try: with open(os.path.join(artifact_path, file)) as f: _artifact[file.split(".")[0]] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(artifact_path, file)}.") from e return _artifact def retrieve_available_artifacts(): class Artifact: def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False): self.name = name self.single_gpu = single_gpu self.multi_gpu = multi_gpu self.paths = [] def __str__(self): return self.name def add_path(self, path: str, gpu: str = None): self.paths.append({"name": self.name, "path": path, "gpu": gpu}) _available_artifacts: Dict[str, Artifact] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: artifact_name = directory name_parts = artifact_name.split("_postfix_") if len(name_parts) > 1: artifact_name = name_parts[0] if artifact_name.startswith("single-gpu"): artifact_name = artifact_name[len("single-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].single_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="single") elif artifact_name.startswith("multi-gpu"): artifact_name = artifact_name[len("multi-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].multi_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="multi") else: if artifact_name not in _available_artifacts: _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts def prepare_reports(title, header, reports, to_truncate=True): report = "" MAX_ERROR_TEXT = 3000 - len("[Truncated]") if not to_truncate: MAX_ERROR_TEXT = float("inf") if len(reports) > 0: # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary for idx in range(len(reports)): _report = header + "\n".join(reports[: idx + 1]) new_report = f"{title}:\n```\n{_report}\n```\n" if len(new_report) > MAX_ERROR_TEXT: # `report` here has length <= 3000 report = report + "[Truncated]" break report = new_report return report if __name__ == "__main__": # runner_status = os.environ.get("RUNNER_STATUS") # runner_env_status = os.environ.get("RUNNER_ENV_STATUS") setup_status = os.environ.get("SETUP_STATUS") # runner_not_available = True if runner_status is not None and runner_status != "success" else False # runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False # Let's keep the lines regardig runners' status (we might be able to use them again in the future) runner_not_available = False runner_failed = False setup_failed = True if setup_status is not None and setup_status != "success" else False org = "huggingface" repo = "transformers" repository_full_name = f"{org}/{repo}" # This env. variable is set in workflow file (under the job `send_results`). ci_event = os.environ["CI_EVENT"] # To find the PR number in a commit title, for example, `Add AwesomeFormer model (#99999)` pr_number_re = re.compile(r"\(#(\d+)\)$") title = f"🤗 Results of the {ci_event} tests." # Add Commit/PR title with a link for push CI # (check the title in 2 env. variables - depending on the CI is triggered via `push` or `workflow_run` event) ci_title_push = os.environ.get("CI_TITLE_PUSH") ci_title_workflow_run = os.environ.get("CI_TITLE_WORKFLOW_RUN") ci_title = ci_title_push if ci_title_push else ci_title_workflow_run ci_sha = os.environ.get("CI_SHA") ci_url = None if ci_sha: ci_url = f"https://github.com/{repository_full_name}/commit/{ci_sha}" if ci_title is not None: if ci_url is None: raise ValueError( "When a title is found (`ci_title`), it means a `push` event or a `workflow_run` even (triggered by " "another `push` event), and the commit SHA has to be provided in order to create the URL to the " "commit page." ) ci_title = ci_title.strip().split("\n")[0].strip() # Retrieve the PR title and author login to complete the report commit_number = ci_url.split("/")[-1] ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/commits/{commit_number}" ci_details = requests.get(ci_detail_url).json() ci_author = ci_details["author"]["login"] merged_by = None # Find the PR number (if any) and change the url to the actual PR page. numbers = pr_number_re.findall(ci_title) if len(numbers) > 0: pr_number = numbers[0] ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/pulls/{pr_number}" ci_details = requests.get(ci_detail_url).json() ci_author = ci_details["user"]["login"] ci_url = f"https://github.com/{repository_full_name}/pull/{pr_number}" merged_by = ci_details["merged_by"]["login"] if merged_by is None: ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author}" else: ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author} | Merged by: {merged_by}" elif ci_sha: ci_title = f"<{ci_url}|commit: {ci_sha}>" else: ci_title = "" if runner_not_available or runner_failed or setup_failed: Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed) exit(0) arguments = sys.argv[1:][0] try: models = ast.literal_eval(arguments) # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names). models = [x.replace("models/", "models_") for x in models] except SyntaxError: Message.error_out(title, ci_title) raise ValueError("Errored out.") github_actions_job_links = get_job_links( workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) available_artifacts = retrieve_available_artifacts() modeling_categories = [ "PyTorch", "TensorFlow", "Flax", "Tokenizers", "Pipelines", "Trainer", "ONNX", "Auto", "Unclassified", ] # This dict will contain all the information relative to each model: # - Failures: the total, as well as the number of failures per-category defined above # - Success: total # - Time spent: as a comma-separated list of elapsed time # - Failures: as a line-break separated list of errors model_results = { model: { "failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in modeling_categories}, "success": 0, "time_spent": "", "failures": {}, "job_link": {}, } for model in models if f"run_all_tests_gpu_{model}_test_reports" in available_artifacts } unclassified_model_failures = [] # This prefix is used to get job links below. For past CI, we use `workflow_call`, which changes the job names from # `Model tests (...)` to `PyTorch 1.5 / Model tests (...)` for example. job_name_prefix = "" if ci_event.startswith("Past CI - "): framework, version = ci_event.replace("Past CI - ", "").split("-") framework = "PyTorch" if framework == "pytorch" else "TensorFlow" job_name_prefix = f"{framework} {version}" elif ci_event.startswith("Nightly CI"): job_name_prefix = "Nightly CI" elif ci_event.startswith("Push CI (AMD) - "): flavor = ci_event.replace("Push CI (AMD) - ", "") job_name_prefix = f"AMD {flavor}" elif ci_event.startswith("Scheduled CI (AMD) - "): flavor = ci_event.replace("Scheduled CI (AMD) - ", "") job_name_prefix = f"AMD {flavor}" for model in model_results.keys(): for artifact_path in available_artifacts[f"run_all_tests_gpu_{model}_test_reports"].paths: artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"]) if "stats" in artifact: # Link to the GitHub Action job # The job names use `matrix.folder` which contain things like `models/bert` instead of `models_bert` job_name = f"Model tests ({model.replace('models_', 'models/')}, {artifact_path['gpu']}-gpu)" if job_name_prefix: job_name = f"{job_name_prefix} / {job_name}" model_results[model]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(job_name) failed, success, time_spent = handle_test_results(artifact["stats"]) model_results[model]["success"] += success model_results[model]["time_spent"] += time_spent[1:-1] + ", " stacktraces = handle_stacktraces(artifact["failures_line"]) for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_path["gpu"] not in model_results[model]["failures"]: model_results[model]["failures"][artifact_path["gpu"]] = [] model_results[model]["failures"][artifact_path["gpu"]].append( {"line": line, "trace": stacktraces.pop(0)} ) if re.search("test_modeling_tf_", line): model_results[model]["failed"]["TensorFlow"][artifact_path["gpu"]] += 1 elif re.search("test_modeling_flax_", line): model_results[model]["failed"]["Flax"][artifact_path["gpu"]] += 1 elif re.search("test_modeling", line): model_results[model]["failed"]["PyTorch"][artifact_path["gpu"]] += 1 elif re.search("test_tokenization", line): model_results[model]["failed"]["Tokenizers"][artifact_path["gpu"]] += 1 elif re.search("test_pipelines", line): model_results[model]["failed"]["Pipelines"][artifact_path["gpu"]] += 1 elif re.search("test_trainer", line): model_results[model]["failed"]["Trainer"][artifact_path["gpu"]] += 1 elif re.search("onnx", line): model_results[model]["failed"]["ONNX"][artifact_path["gpu"]] += 1 elif re.search("auto", line): model_results[model]["failed"]["Auto"][artifact_path["gpu"]] += 1 else: model_results[model]["failed"]["Unclassified"][artifact_path["gpu"]] += 1 unclassified_model_failures.append(line) # Additional runs additional_files = { "Examples directory": "run_examples_gpu", "PyTorch pipelines": "run_tests_torch_pipeline_gpu", "TensorFlow pipelines": "run_tests_tf_pipeline_gpu", "Torch CUDA extension tests": "run_tests_torch_cuda_extensions_gpu_test_reports", } if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI"): del additional_files["Examples directory"] del additional_files["PyTorch pipelines"] del additional_files["TensorFlow pipelines"] elif ci_event.startswith("Scheduled CI (AMD)"): del additional_files["TensorFlow pipelines"] del additional_files["Torch CUDA extension tests"] elif ci_event.startswith("Push CI (AMD)"): additional_files = {} additional_results = { key: { "failed": {"unclassified": 0, "single": 0, "multi": 0}, "success": 0, "time_spent": "", "error": False, "failures": {}, "job_link": {}, } for key in additional_files.keys() } for key in additional_results.keys(): # If a whole suite of test fails, the artifact isn't available. if additional_files[key] not in available_artifacts: additional_results[key]["error"] = True continue for artifact_path in available_artifacts[additional_files[key]].paths: # Link to the GitHub Action job job_name = key if artifact_path["gpu"] is not None: job_name = f"{key} ({artifact_path['gpu']}-gpu)" if job_name_prefix: job_name = f"{job_name_prefix} / {job_name}" additional_results[key]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(job_name) artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"]) stacktraces = handle_stacktraces(artifact["failures_line"]) failed, success, time_spent = handle_test_results(artifact["stats"]) additional_results[key]["failed"][artifact_path["gpu"] or "unclassified"] += failed additional_results[key]["success"] += success additional_results[key]["time_spent"] += time_spent[1:-1] + ", " if len(artifact["errors"]): additional_results[key]["error"] = True if failed: for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_path["gpu"] not in additional_results[key]["failures"]: additional_results[key]["failures"][artifact_path["gpu"]] = [] additional_results[key]["failures"][artifact_path["gpu"]].append( {"line": line, "trace": stacktraces.pop(0)} ) selected_warnings = [] if "warnings_in_ci" in available_artifacts: directory = available_artifacts["warnings_in_ci"].paths[0]["path"] with open(os.path.join(directory, "selected_warnings.json")) as fp: selected_warnings = json.load(fp) if not os.path.isdir(os.path.join(os.getcwd(), "prev_ci_results")): os.makedirs(os.path.join(os.getcwd(), "prev_ci_results")) with open("prev_ci_results/model_results.json", "w", encoding="UTF-8") as fp: json.dump(model_results, fp, indent=4, ensure_ascii=False) prev_ci_artifacts = None target_workflow = "huggingface/transformers/.github/workflows/self-scheduled.yml@refs/heads/main" if os.environ.get("CI_WORKFLOW_REF") == target_workflow: # Get the last previously completed CI's failure tables artifact_names = ["prev_ci_results"] output_dir = os.path.join(os.getcwd(), "previous_reports") os.makedirs(output_dir, exist_ok=True) prev_ci_artifacts = get_last_daily_ci_reports( artifact_names=artifact_names, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) message = Message( title, ci_title, model_results, additional_results, selected_warnings=selected_warnings, prev_ci_artifacts=prev_ci_artifacts, ) # send report only if there is any failure (for push CI) if message.n_failures or (ci_event != "push" and not ci_event.startswith("Push CI (AMD)")): message.post() message.post_reply()
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/not_doctested.txt
docs/source/en/_config.py docs/source/en/accelerate.md docs/source/en/add_new_model.md docs/source/en/add_new_pipeline.md docs/source/en/add_tensorflow_model.md docs/source/en/attention.md docs/source/en/benchmarks.md docs/source/en/bertology.md docs/source/en/big_models.md docs/source/en/community.md docs/source/en/contributing.md docs/source/en/create_a_model.md docs/source/en/custom_models.md docs/source/en/custom_tools.md docs/source/en/debugging.md docs/source/en/fast_tokenizers.md docs/source/en/glossary.md docs/source/en/hpo_train.md docs/source/en/index.md docs/source/en/installation.md docs/source/en/internal/audio_utils.md docs/source/en/internal/file_utils.md docs/source/en/internal/image_processing_utils.md docs/source/en/internal/modeling_utils.md docs/source/en/internal/pipelines_utils.md docs/source/en/internal/time_series_utils.md docs/source/en/internal/tokenization_utils.md docs/source/en/internal/trainer_utils.md docs/source/en/llm_tutorial.md docs/source/en/main_classes/agent.md docs/source/en/main_classes/callback.md docs/source/en/main_classes/configuration.md docs/source/en/main_classes/data_collator.md docs/source/en/main_classes/deepspeed.md docs/source/en/main_classes/feature_extractor.md docs/source/en/main_classes/image_processor.md docs/source/en/main_classes/keras_callbacks.md docs/source/en/main_classes/logging.md docs/source/en/main_classes/model.md docs/source/en/main_classes/onnx.md docs/source/en/main_classes/optimizer_schedules.md docs/source/en/main_classes/output.md docs/source/en/main_classes/pipelines.md docs/source/en/main_classes/processors.md docs/source/en/main_classes/quantization.md docs/source/en/main_classes/tokenizer.md docs/source/en/main_classes/trainer.md docs/source/en/model_doc/albert.md docs/source/en/model_doc/align.md docs/source/en/model_doc/altclip.md docs/source/en/model_doc/audio-spectrogram-transformer.md docs/source/en/model_doc/auto.md docs/source/en/model_doc/autoformer.md docs/source/en/model_doc/bark.md docs/source/en/model_doc/bart.md docs/source/en/model_doc/barthez.md docs/source/en/model_doc/bartpho.md docs/source/en/model_doc/beit.md docs/source/en/model_doc/bert-generation.md docs/source/en/model_doc/bert-japanese.md docs/source/en/model_doc/bert.md docs/source/en/model_doc/bertweet.md docs/source/en/model_doc/big_bird.md docs/source/en/model_doc/bigbird_pegasus.md docs/source/en/model_doc/biogpt.md docs/source/en/model_doc/bit.md docs/source/en/model_doc/blenderbot-small.md docs/source/en/model_doc/blenderbot.md docs/source/en/model_doc/blip-2.md docs/source/en/model_doc/blip.md docs/source/en/model_doc/bloom.md docs/source/en/model_doc/bort.md docs/source/en/model_doc/bridgetower.md docs/source/en/model_doc/camembert.md docs/source/en/model_doc/canine.md docs/source/en/model_doc/chinese_clip.md docs/source/en/model_doc/clap.md docs/source/en/model_doc/clip.md docs/source/en/model_doc/clipseg.md docs/source/en/model_doc/codegen.md docs/source/en/model_doc/conditional_detr.md docs/source/en/model_doc/convbert.md docs/source/en/model_doc/convnext.md docs/source/en/model_doc/convnextv2.md docs/source/en/model_doc/cpm.md docs/source/en/model_doc/cpmant.md docs/source/en/model_doc/ctrl.md docs/source/en/model_doc/cvt.md docs/source/en/model_doc/data2vec.md docs/source/en/model_doc/deberta-v2.md docs/source/en/model_doc/deberta.md docs/source/en/model_doc/decision_transformer.md docs/source/en/model_doc/deformable_detr.md docs/source/en/model_doc/deit.md docs/source/en/model_doc/deplot.md docs/source/en/model_doc/deta.md docs/source/en/model_doc/detr.md docs/source/en/model_doc/dialogpt.md docs/source/en/model_doc/dinat.md docs/source/en/model_doc/dinov2.md docs/source/en/model_doc/distilbert.md docs/source/en/model_doc/dit.md docs/source/en/model_doc/dpr.md docs/source/en/model_doc/dpt.md docs/source/en/model_doc/efficientformer.md docs/source/en/model_doc/efficientnet.md docs/source/en/model_doc/electra.md docs/source/en/model_doc/encodec.md docs/source/en/model_doc/ernie.md docs/source/en/model_doc/ernie_m.md docs/source/en/model_doc/esm.md docs/source/en/model_doc/flan-t5.md docs/source/en/model_doc/flan-ul2.md docs/source/en/model_doc/flaubert.md docs/source/en/model_doc/flava.md docs/source/en/model_doc/fnet.md docs/source/en/model_doc/focalnet.md docs/source/en/model_doc/fsmt.md docs/source/en/model_doc/funnel.md docs/source/en/model_doc/git.md docs/source/en/model_doc/glpn.md docs/source/en/model_doc/gpt-sw3.md docs/source/en/model_doc/gpt2.md docs/source/en/model_doc/gpt_bigcode.md docs/source/en/model_doc/gpt_neo.md docs/source/en/model_doc/gpt_neox.md docs/source/en/model_doc/gpt_neox_japanese.md docs/source/en/model_doc/gptj.md docs/source/en/model_doc/gptsan-japanese.md docs/source/en/model_doc/graphormer.md docs/source/en/model_doc/groupvit.md docs/source/en/model_doc/herbert.md docs/source/en/model_doc/hubert.md docs/source/en/model_doc/ibert.md docs/source/en/model_doc/idefics.md docs/source/en/model_doc/imagegpt.md docs/source/en/model_doc/informer.md docs/source/en/model_doc/instructblip.md docs/source/en/model_doc/jukebox.md docs/source/en/model_doc/layoutlm.md docs/source/en/model_doc/layoutlmv2.md docs/source/en/model_doc/layoutlmv3.md docs/source/en/model_doc/layoutxlm.md docs/source/en/model_doc/led.md docs/source/en/model_doc/levit.md docs/source/en/model_doc/lilt.md docs/source/en/model_doc/llama.md docs/source/en/model_doc/llama2.md docs/source/en/model_doc/llava.md docs/source/en/model_doc/longformer.md docs/source/en/model_doc/longt5.md docs/source/en/model_doc/luke.md docs/source/en/model_doc/lxmert.md docs/source/en/model_doc/m2m_100.md docs/source/en/model_doc/madlad-400.md docs/source/en/model_doc/marian.md docs/source/en/model_doc/mask2former.md docs/source/en/model_doc/maskformer.md docs/source/en/model_doc/matcha.md docs/source/en/model_doc/mbart.md docs/source/en/model_doc/mctct.md docs/source/en/model_doc/mega.md docs/source/en/model_doc/megatron-bert.md docs/source/en/model_doc/megatron_gpt2.md docs/source/en/model_doc/mgp-str.md docs/source/en/model_doc/mistral.md docs/source/en/model_doc/mluke.md docs/source/en/model_doc/mms.md docs/source/en/model_doc/mobilebert.md docs/source/en/model_doc/mobilenet_v1.md docs/source/en/model_doc/mobilenet_v2.md docs/source/en/model_doc/mobilevit.md docs/source/en/model_doc/mobilevitv2.md docs/source/en/model_doc/mpnet.md docs/source/en/model_doc/mpt.md docs/source/en/model_doc/mra.md docs/source/en/model_doc/mt5.md docs/source/en/model_doc/musicgen.md docs/source/en/model_doc/mvp.md docs/source/en/model_doc/nat.md docs/source/en/model_doc/nezha.md docs/source/en/model_doc/nllb-moe.md docs/source/en/model_doc/nllb.md docs/source/en/model_doc/nystromformer.md docs/source/en/model_doc/oneformer.md docs/source/en/model_doc/open-llama.md docs/source/en/model_doc/openai-gpt.md docs/source/en/model_doc/opt.md docs/source/en/model_doc/owlvit.md docs/source/en/model_doc/pegasus.md docs/source/en/model_doc/pegasus_x.md docs/source/en/model_doc/perceiver.md docs/source/en/model_doc/phobert.md docs/source/en/model_doc/pix2struct.md docs/source/en/model_doc/plbart.md docs/source/en/model_doc/poolformer.md docs/source/en/model_doc/pop2piano.md docs/source/en/model_doc/prophetnet.md docs/source/en/model_doc/pvt.md docs/source/en/model_doc/qdqbert.md docs/source/en/model_doc/rag.md docs/source/en/model_doc/realm.md docs/source/en/model_doc/reformer.md docs/source/en/model_doc/regnet.md docs/source/en/model_doc/rembert.md docs/source/en/model_doc/resnet.md docs/source/en/model_doc/retribert.md docs/source/en/model_doc/roberta-prelayernorm.md docs/source/en/model_doc/roberta.md docs/source/en/model_doc/roc_bert.md docs/source/en/model_doc/roformer.md docs/source/en/model_doc/rwkv.md docs/source/en/model_doc/sam.md docs/source/en/model_doc/segformer.md docs/source/en/model_doc/sew-d.md docs/source/en/model_doc/sew.md docs/source/en/model_doc/speech-encoder-decoder.md docs/source/en/model_doc/speech_to_text_2.md docs/source/en/model_doc/speecht5.md docs/source/en/model_doc/splinter.md docs/source/en/model_doc/squeezebert.md docs/source/en/model_doc/swiftformer.md docs/source/en/model_doc/swin.md docs/source/en/model_doc/swin2sr.md docs/source/en/model_doc/swinv2.md docs/source/en/model_doc/table-transformer.md docs/source/en/model_doc/tapas.md docs/source/en/model_doc/time_series_transformer.md docs/source/en/model_doc/timesformer.md docs/source/en/model_doc/trajectory_transformer.md docs/source/en/model_doc/transfo-xl.md docs/source/en/model_doc/trocr.md docs/source/en/model_doc/tvlt.md docs/source/en/model_doc/ul2.md docs/source/en/model_doc/umt5.md docs/source/en/model_doc/unispeech-sat.md docs/source/en/model_doc/unispeech.md docs/source/en/model_doc/upernet.md docs/source/en/model_doc/van.md docs/source/en/model_doc/videomae.md docs/source/en/model_doc/vilt.md docs/source/en/model_doc/vision-encoder-decoder.md docs/source/en/model_doc/vision-text-dual-encoder.md docs/source/en/model_doc/visual_bert.md docs/source/en/model_doc/vit.md docs/source/en/model_doc/vit_hybrid.md docs/source/en/model_doc/vit_mae.md docs/source/en/model_doc/vit_msn.md docs/source/en/model_doc/vivit.md docs/source/en/model_doc/wav2vec2-conformer.md docs/source/en/model_doc/wav2vec2.md docs/source/en/model_doc/wav2vec2_phoneme.md docs/source/en/model_doc/wavlm.md docs/source/en/model_doc/whisper.md docs/source/en/model_doc/xclip.md docs/source/en/model_doc/xglm.md docs/source/en/model_doc/xlm-prophetnet.md docs/source/en/model_doc/xlm-roberta-xl.md docs/source/en/model_doc/xlm-roberta.md docs/source/en/model_doc/xlm-v.md docs/source/en/model_doc/xlm.md docs/source/en/model_doc/xlnet.md docs/source/en/model_doc/xls_r.md docs/source/en/model_doc/xlsr_wav2vec2.md docs/source/en/model_doc/xmod.md docs/source/en/model_doc/yolos.md docs/source/en/model_doc/yoso.md docs/source/en/model_memory_anatomy.md docs/source/en/model_sharing.md docs/source/en/model_summary.md docs/source/en/multilingual.md docs/source/en/notebooks.md docs/source/en/pad_truncation.md docs/source/en/peft.md docs/source/en/perf_hardware.md docs/source/en/perf_infer_cpu.md docs/source/en/perf_infer_gpu_one.md docs/source/en/perf_torch_compile.md docs/source/en/perf_train_cpu.md docs/source/en/perf_train_cpu_many.md docs/source/en/perf_train_gpu_many.md docs/source/en/perf_train_gpu_one.md docs/source/en/perf_train_special.md docs/source/en/perf_train_tpu.md docs/source/en/perf_train_tpu_tf.md docs/source/en/performance.md docs/source/en/perplexity.md docs/source/en/philosophy.md docs/source/en/pipeline_webserver.md docs/source/en/pr_checks.md docs/source/en/preprocessing.md docs/source/en/run_scripts.md docs/source/en/sagemaker.md docs/source/en/serialization.md docs/source/en/tasks/asr.md docs/source/en/tasks/audio_classification.md docs/source/en/tasks/document_question_answering.md docs/source/en/tasks/idefics.md docs/source/en/tasks/image_captioning.md docs/source/en/tasks/image_classification.md docs/source/en/tasks/language_modeling.md docs/source/en/tasks/masked_language_modeling.md docs/source/en/tasks/monocular_depth_estimation.md docs/source/en/tasks/multiple_choice.md docs/source/en/tasks/object_detection.md docs/source/en/tasks/question_answering.md docs/source/en/tasks/semantic_segmentation.md docs/source/en/tasks/sequence_classification.md docs/source/en/tasks/summarization.md docs/source/en/tasks/text-to-speech.md docs/source/en/tasks/token_classification.md docs/source/en/tasks/translation.md docs/source/en/tasks/video_classification.md docs/source/en/tasks/visual_question_answering.md docs/source/en/tasks/zero_shot_image_classification.md docs/source/en/tasks/zero_shot_object_detection.md docs/source/en/tasks_explained.md docs/source/en/tf_xla.md docs/source/en/tflite.md docs/source/en/tokenizer_summary.md docs/source/en/torchscript.md docs/source/en/training.md docs/source/en/transformers_agents.md docs/source/en/troubleshooting.md src/transformers/activations.py src/transformers/activations_tf.py src/transformers/audio_utils.py src/transformers/benchmark/benchmark.py src/transformers/benchmark/benchmark_args.py src/transformers/benchmark/benchmark_args_tf.py src/transformers/benchmark/benchmark_args_utils.py src/transformers/benchmark/benchmark_tf.py src/transformers/benchmark/benchmark_utils.py src/transformers/commands/add_new_model.py src/transformers/commands/add_new_model_like.py src/transformers/commands/convert.py src/transformers/commands/download.py src/transformers/commands/env.py src/transformers/commands/lfs.py src/transformers/commands/pt_to_tf.py src/transformers/commands/run.py src/transformers/commands/serving.py src/transformers/commands/train.py src/transformers/commands/transformers_cli.py src/transformers/commands/user.py src/transformers/configuration_utils.py src/transformers/convert_graph_to_onnx.py src/transformers/convert_pytorch_checkpoint_to_tf2.py src/transformers/convert_slow_tokenizer.py src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py src/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py src/transformers/data/data_collator.py src/transformers/data/datasets/glue.py src/transformers/data/datasets/language_modeling.py src/transformers/data/datasets/squad.py src/transformers/data/metrics/squad_metrics.py src/transformers/data/processors/glue.py src/transformers/data/processors/squad.py src/transformers/data/processors/utils.py src/transformers/data/processors/xnli.py src/transformers/debug_utils.py src/transformers/deepspeed.py src/transformers/dependency_versions_check.py src/transformers/dependency_versions_table.py src/transformers/dynamic_module_utils.py src/transformers/feature_extraction_sequence_utils.py src/transformers/feature_extraction_utils.py src/transformers/file_utils.py src/transformers/hf_argparser.py src/transformers/hyperparameter_search.py src/transformers/image_processing_utils.py src/transformers/image_transforms.py src/transformers/image_utils.py src/transformers/integrations/bitsandbytes.py src/transformers/integrations/deepspeed.py src/transformers/integrations/integration_utils.py src/transformers/integrations/peft.py src/transformers/keras_callbacks.py src/transformers/modelcard.py src/transformers/modeling_flax_outputs.py src/transformers/modeling_flax_pytorch_utils.py src/transformers/modeling_flax_utils.py src/transformers/modeling_outputs.py src/transformers/modeling_tf_outputs.py src/transformers/modeling_tf_pytorch_utils.py src/transformers/modeling_tf_utils.py src/transformers/modeling_utils.py src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py src/transformers/models/albert/modeling_flax_albert.py src/transformers/models/align/configuration_align.py src/transformers/models/align/convert_align_tf_to_hf.py src/transformers/models/align/modeling_align.py src/transformers/models/altclip/configuration_altclip.py src/transformers/models/altclip/modeling_altclip.py src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py src/transformers/models/auto/auto_factory.py src/transformers/models/auto/configuration_auto.py src/transformers/models/auto/modeling_auto.py src/transformers/models/auto/modeling_flax_auto.py src/transformers/models/auto/modeling_tf_auto.py src/transformers/models/autoformer/configuration_autoformer.py src/transformers/models/autoformer/modeling_autoformer.py src/transformers/models/bark/convert_suno_to_hf.py src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/bart/modeling_flax_bart.py src/transformers/models/bart/modeling_tf_bart.py src/transformers/models/beit/convert_beit_unilm_to_pytorch.py src/transformers/models/beit/modeling_flax_beit.py src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py src/transformers/models/bert/modeling_flax_bert.py src/transformers/models/bert_generation/modeling_bert_generation.py src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py src/transformers/models/big_bird/modeling_flax_big_bird.py src/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py src/transformers/models/biogpt/configuration_biogpt.py src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/biogpt/modeling_biogpt.py src/transformers/models/bit/configuration_bit.py src/transformers/models/bit/convert_bit_to_pytorch.py src/transformers/models/bit/modeling_bit.py src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/blenderbot/modeling_flax_blenderbot.py src/transformers/models/blenderbot/modeling_tf_blenderbot.py src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py src/transformers/models/blip/configuration_blip.py src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py src/transformers/models/blip/modeling_blip_text.py src/transformers/models/blip/modeling_tf_blip_text.py src/transformers/models/blip_2/configuration_blip_2.py src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py src/transformers/models/blip_2/modeling_blip_2.py src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py src/transformers/models/bloom/modeling_bloom.py src/transformers/models/bloom/modeling_flax_bloom.py src/transformers/models/bridgetower/configuration_bridgetower.py src/transformers/models/bridgetower/modeling_bridgetower.py src/transformers/models/bros/convert_bros_to_pytorch.py src/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py src/transformers/models/camembert/modeling_camembert.py src/transformers/models/camembert/modeling_tf_camembert.py src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py src/transformers/models/chinese_clip/configuration_chinese_clip.py src/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py src/transformers/models/chinese_clip/modeling_chinese_clip.py src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py src/transformers/models/clip/modeling_clip.py src/transformers/models/clip/modeling_flax_clip.py src/transformers/models/clip/modeling_tf_clip.py src/transformers/models/clipseg/configuration_clipseg.py src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py src/transformers/models/codegen/modeling_codegen.py src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py src/transformers/models/convbert/modeling_convbert.py src/transformers/models/convbert/modeling_tf_convbert.py src/transformers/models/convnext/convert_convnext_to_pytorch.py src/transformers/models/convnext/modeling_tf_convnext.py src/transformers/models/convnextv2/configuration_convnextv2.py src/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py src/transformers/models/convnextv2/modeling_convnextv2.py src/transformers/models/cpmant/configuration_cpmant.py src/transformers/models/cpmant/modeling_cpmant.py src/transformers/models/cpmant/tokenization_cpmant.py src/transformers/models/ctrl/modeling_tf_ctrl.py src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/cvt/modeling_tf_cvt.py src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/data2vec/modeling_data2vec_text.py src/transformers/models/data2vec/modeling_tf_data2vec_vision.py src/transformers/models/deberta/modeling_tf_deberta.py src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py src/transformers/models/decision_transformer/modeling_decision_transformer.py src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py src/transformers/models/deformable_detr/load_custom.py src/transformers/models/deit/convert_deit_timm_to_pytorch.py src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py src/transformers/models/deprecated/mctct/configuration_mctct.py src/transformers/models/deprecated/mctct/feature_extraction_mctct.py src/transformers/models/deprecated/mctct/modeling_mctct.py src/transformers/models/deprecated/mctct/processing_mctct.py src/transformers/models/deprecated/mmbt/configuration_mmbt.py src/transformers/models/deprecated/mmbt/modeling_mmbt.py src/transformers/models/deprecated/open_llama/configuration_open_llama.py src/transformers/models/deprecated/open_llama/modeling_open_llama.py src/transformers/models/deprecated/retribert/configuration_retribert.py src/transformers/models/deprecated/retribert/modeling_retribert.py src/transformers/models/deprecated/retribert/tokenization_retribert.py src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py src/transformers/models/deprecated/tapex/tokenization_tapex.py src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py src/transformers/models/deprecated/van/configuration_van.py src/transformers/models/deprecated/van/convert_van_to_pytorch.py src/transformers/models/deprecated/van/modeling_van.py src/transformers/models/deta/convert_deta_resnet_to_pytorch.py src/transformers/models/deta/convert_deta_swin_to_pytorch.py src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/detr/convert_detr_to_pytorch.py src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/dinov2/configuration_dinov2.py src/transformers/models/dinov2/convert_dinov2_to_hf.py src/transformers/models/dinov2/modeling_dinov2.py src/transformers/models/distilbert/modeling_distilbert.py src/transformers/models/distilbert/modeling_flax_distilbert.py src/transformers/models/distilbert/modeling_tf_distilbert.py src/transformers/models/dit/convert_dit_unilm_to_pytorch.py src/transformers/models/donut/configuration_donut_swin.py src/transformers/models/donut/convert_donut_to_pytorch.py src/transformers/models/donut/modeling_donut_swin.py src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py src/transformers/models/dpr/modeling_dpr.py src/transformers/models/dpr/modeling_tf_dpr.py src/transformers/models/dpt/configuration_dpt.py src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py src/transformers/models/dpt/convert_dpt_to_pytorch.py src/transformers/models/efficientformer/configuration_efficientformer.py src/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/efficientformer/modeling_efficientformer.py src/transformers/models/efficientnet/configuration_efficientnet.py src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py src/transformers/models/efficientnet/modeling_efficientnet.py src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py src/transformers/models/electra/modeling_flax_electra.py src/transformers/models/encodec/configuration_encodec.py src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py src/transformers/models/encoder_decoder/modeling_encoder_decoder.py src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py src/transformers/models/ernie/modeling_ernie.py src/transformers/models/esm/configuration_esm.py src/transformers/models/esm/convert_esm.py src/transformers/models/esm/modeling_esm.py src/transformers/models/esm/modeling_esmfold.py src/transformers/models/esm/modeling_tf_esm.py src/transformers/models/esm/openfold_utils/chunk_utils.py src/transformers/models/esm/openfold_utils/data_transforms.py src/transformers/models/esm/openfold_utils/feats.py src/transformers/models/esm/openfold_utils/loss.py src/transformers/models/esm/openfold_utils/protein.py src/transformers/models/esm/openfold_utils/residue_constants.py src/transformers/models/esm/openfold_utils/rigid_utils.py src/transformers/models/esm/openfold_utils/tensor_utils.py src/transformers/models/falcon/configuration_falcon.py src/transformers/models/falcon/modeling_falcon.py src/transformers/models/flaubert/configuration_flaubert.py src/transformers/models/flaubert/modeling_flaubert.py src/transformers/models/flaubert/modeling_tf_flaubert.py src/transformers/models/flava/convert_dalle_to_flava_codebook.py src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py src/transformers/models/flava/modeling_flava.py src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py src/transformers/models/fnet/modeling_fnet.py src/transformers/models/focalnet/configuration_focalnet.py src/transformers/models/focalnet/convert_focalnet_to_hf_format.py src/transformers/models/focalnet/modeling_focalnet.py src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/fsmt/modeling_fsmt.py src/transformers/models/funnel/configuration_funnel.py src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py src/transformers/models/funnel/modeling_funnel.py src/transformers/models/funnel/modeling_tf_funnel.py src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py src/transformers/models/git/configuration_git.py src/transformers/models/git/convert_git_to_pytorch.py src/transformers/models/glpn/configuration_glpn.py src/transformers/models/glpn/convert_glpn_to_pytorch.py src/transformers/models/gpt2/CONVERSION.md src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py src/transformers/models/gpt2/modeling_flax_gpt2.py src/transformers/models/gpt2/modeling_tf_gpt2.py src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py src/transformers/models/gpt_neo/modeling_gpt_neo.py src/transformers/models/gpt_neox/modeling_gpt_neox.py src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py src/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py src/transformers/models/gptj/configuration_gptj.py src/transformers/models/gptj/modeling_flax_gptj.py src/transformers/models/gptj/modeling_tf_gptj.py src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py src/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py src/transformers/models/graphormer/collating_graphormer.py src/transformers/models/graphormer/configuration_graphormer.py src/transformers/models/graphormer/modeling_graphormer.py src/transformers/models/groupvit/configuration_groupvit.py src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py src/transformers/models/hubert/configuration_hubert.py src/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py src/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py src/transformers/models/hubert/modeling_tf_hubert.py src/transformers/models/ibert/configuration_ibert.py src/transformers/models/ibert/modeling_ibert.py src/transformers/models/ibert/quant_modules.py src/transformers/models/idefics/configuration_idefics.py src/transformers/models/idefics/image_processing_idefics.py src/transformers/models/idefics/modeling_idefics.py src/transformers/models/idefics/perceiver.py src/transformers/models/idefics/processing_idefics.py src/transformers/models/idefics/vision.py src/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py src/transformers/models/informer/configuration_informer.py src/transformers/models/informer/modeling_informer.py src/transformers/models/instructblip/configuration_instructblip.py src/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py src/transformers/models/instructblip/modeling_instructblip.py src/transformers/models/instructblip/processing_instructblip.py src/transformers/models/jukebox/configuration_jukebox.py src/transformers/models/jukebox/convert_jukebox.py src/transformers/models/jukebox/modeling_jukebox.py src/transformers/models/kosmos2/convert_kosmos2_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/led/configuration_led.py src/transformers/models/led/modeling_led.py src/transformers/models/led/modeling_tf_led.py src/transformers/models/levit/convert_levit_timm_to_pytorch.py src/transformers/models/levit/modeling_levit.py src/transformers/models/lilt/configuration_lilt.py src/transformers/models/llama/configuration_llama.py src/transformers/models/llama/convert_llama_weights_to_hf.py src/transformers/models/llama/modeling_llama.py src/transformers/models/llava/configuration_llava.py src/transformers/models/llava/modeling_llava.py src/transformers/models/longformer/configuration_longformer.py src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py src/transformers/models/longt5/configuration_longt5.py src/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py src/transformers/models/longt5/modeling_flax_longt5.py src/transformers/models/luke/configuration_luke.py src/transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/luke/modeling_luke.py src/transformers/models/lxmert/configuration_lxmert.py src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py src/transformers/models/lxmert/modeling_lxmert.py src/transformers/models/lxmert/modeling_tf_lxmert.py src/transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py src/transformers/models/m2m_100/modeling_m2m_100.py src/transformers/models/marian/configuration_marian.py src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py src/transformers/models/marian/convert_marian_to_pytorch.py src/transformers/models/marian/modeling_flax_marian.py src/transformers/models/marian/modeling_tf_marian.py src/transformers/models/markuplm/configuration_markuplm.py src/transformers/models/markuplm/feature_extraction_markuplm.py src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/maskformer/configuration_maskformer_swin.py src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/maskformer/convert_maskformer_resnet_to_pytorch.py src/transformers/models/maskformer/convert_maskformer_swin_to_pytorch.py src/transformers/models/maskformer/modeling_maskformer_swin.py src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py src/transformers/models/mbart/modeling_flax_mbart.py src/transformers/models/mega/configuration_mega.py src/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/mega/modeling_mega.py src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py src/transformers/models/megatron_bert/modeling_megatron_bert.py src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py src/transformers/models/mgp_str/configuration_mgp_str.py src/transformers/models/mgp_str/modeling_mgp_str.py src/transformers/models/mistral/configuration_mistral.py src/transformers/models/mistral/modeling_mistral.py src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py src/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py src/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py src/transformers/models/mobilevit/configuration_mobilevit.py src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py src/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py src/transformers/models/mpnet/configuration_mpnet.py src/transformers/models/mpnet/modeling_mpnet.py src/transformers/models/mpnet/modeling_tf_mpnet.py src/transformers/models/mpt/configuration_mpt.py src/transformers/models/mpt/modeling_mpt.py src/transformers/models/mra/configuration_mra.py src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py src/transformers/models/mra/modeling_mra.py src/transformers/models/mt5/configuration_mt5.py src/transformers/models/mt5/modeling_flax_mt5.py src/transformers/models/mt5/modeling_mt5.py src/transformers/models/mt5/modeling_tf_mt5.py src/transformers/models/musicgen/convert_musicgen_transformers.py src/transformers/models/mvp/modeling_mvp.py src/transformers/models/nezha/modeling_nezha.py src/transformers/models/nllb_moe/configuration_nllb_moe.py src/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py src/transformers/models/nllb_moe/modeling_nllb_moe.py src/transformers/models/nougat/convert_nougat_to_hf.py src/transformers/models/nystromformer/configuration_nystromformer.py src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/nystromformer/modeling_nystromformer.py src/transformers/models/oneformer/convert_to_hf_oneformer.py src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py src/transformers/models/openai/modeling_openai.py src/transformers/models/openai/modeling_tf_openai.py src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/opt/modeling_flax_opt.py src/transformers/models/owlvit/configuration_owlvit.py src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py src/transformers/models/pegasus/modeling_flax_pegasus.py src/transformers/models/pegasus/modeling_tf_pegasus.py src/transformers/models/pegasus_x/modeling_pegasus_x.py src/transformers/models/perceiver/configuration_perceiver.py src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py src/transformers/models/persimmon/modeling_persimmon.py src/transformers/models/pix2struct/configuration_pix2struct.py src/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py src/transformers/models/pix2struct/image_processing_pix2struct.py src/transformers/models/pix2struct/processing_pix2struct.py src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py src/transformers/models/pop2piano/feature_extraction_pop2piano.py src/transformers/models/pop2piano/processing_pop2piano.py src/transformers/models/pop2piano/tokenization_pop2piano.py src/transformers/models/prophetnet/configuration_prophetnet.py src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/prophetnet/modeling_prophetnet.py src/transformers/models/pvt/configuration_pvt.py src/transformers/models/pvt/convert_pvt_to_pytorch.py src/transformers/models/pvt/image_processing_pvt.py src/transformers/models/pvt/modeling_pvt.py src/transformers/models/qdqbert/configuration_qdqbert.py src/transformers/models/qdqbert/modeling_qdqbert.py src/transformers/models/rag/configuration_rag.py src/transformers/models/rag/modeling_rag.py src/transformers/models/rag/modeling_tf_rag.py src/transformers/models/rag/retrieval_rag.py src/transformers/models/realm/modeling_realm.py src/transformers/models/realm/retrieval_realm.py src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py src/transformers/models/regnet/configuration_regnet.py src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py src/transformers/models/regnet/convert_regnet_to_pytorch.py src/transformers/models/regnet/modeling_flax_regnet.py src/transformers/models/rembert/configuration_rembert.py src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py src/transformers/models/rembert/modeling_rembert.py src/transformers/models/rembert/modeling_tf_rembert.py src/transformers/models/resnet/convert_resnet_to_pytorch.py src/transformers/models/resnet/modeling_flax_resnet.py src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/roberta/modeling_flax_roberta.py src/transformers/models/roberta_prelayernorm/convert_roberta_prelayernorm_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py src/transformers/models/roc_bert/configuration_roc_bert.py src/transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py src/transformers/models/roformer/modeling_flax_roformer.py src/transformers/models/roformer/modeling_roformer.py src/transformers/models/roformer/modeling_tf_roformer.py src/transformers/models/rwkv/configuration_rwkv.py src/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py src/transformers/models/rwkv/modeling_rwkv.py src/transformers/models/sam/configuration_sam.py src/transformers/models/sam/convert_sam_original_to_hf_format.py src/transformers/models/sam/image_processing_sam.py src/transformers/models/sam/modeling_sam.py src/transformers/models/sam/modeling_tf_sam.py src/transformers/models/sam/processing_sam.py src/transformers/models/seamless_m4t/convert_fairseq2_to_hf.py src/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py src/transformers/models/segformer/configuration_segformer.py src/transformers/models/segformer/convert_segformer_original_to_pytorch.py src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py src/transformers/models/speecht5/configuration_speecht5.py src/transformers/models/speecht5/convert_hifigan.py src/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/speecht5/number_normalizer.py src/transformers/models/splinter/configuration_splinter.py src/transformers/models/splinter/modeling_splinter.py src/transformers/models/squeezebert/modeling_squeezebert.py src/transformers/models/swiftformer/configuration_swiftformer.py src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py src/transformers/models/swiftformer/modeling_swiftformer.py src/transformers/models/swin/convert_swin_simmim_to_pytorch.py src/transformers/models/swin/convert_swin_timm_to_pytorch.py src/transformers/models/swin/modeling_tf_swin.py src/transformers/models/swin2sr/configuration_swin2sr.py src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py src/transformers/models/swinv2/modeling_swinv2.py src/transformers/models/switch_transformers/configuration_switch_transformers.py src/transformers/models/switch_transformers/convert_big_switch.py src/transformers/models/switch_transformers/convert_switch_transformers_original_flax_checkpoint_to_pytorch.py src/transformers/models/switch_transformers/modeling_switch_transformers.py src/transformers/models/t5/configuration_t5.py src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py src/transformers/models/t5/modeling_flax_t5.py src/transformers/models/t5/modeling_t5.py src/transformers/models/t5/modeling_tf_t5.py src/transformers/models/table_transformer/configuration_table_transformer.py src/transformers/models/table_transformer/convert_table_transformer_to_hf.py src/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py src/transformers/models/tapas/configuration_tapas.py src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py src/transformers/models/tapas/modeling_tapas.py src/transformers/models/tapas/modeling_tf_tapas.py src/transformers/models/timesformer/convert_timesformer_to_pytorch.py src/transformers/models/timm_backbone/configuration_timm_backbone.py src/transformers/models/timm_backbone/modeling_timm_backbone.py src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py src/transformers/models/tvlt/configuration_tvlt.py src/transformers/models/tvlt/modeling_tvlt.py src/transformers/models/umt5/configuration_umt5.py src/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py src/transformers/models/umt5/modeling_umt5.py src/transformers/models/unispeech/convert_unispeech_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/unispeech_sat/configuration_unispeech_sat.py src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py src/transformers/models/unispeech_sat/convert_unispeech_sat_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/upernet/configuration_upernet.py src/transformers/models/upernet/convert_convnext_upernet_to_pytorch.py src/transformers/models/upernet/convert_swin_upernet_to_pytorch.py src/transformers/models/videomae/configuration_videomae.py src/transformers/models/videomae/convert_videomae_to_pytorch.py src/transformers/models/vilt/configuration_vilt.py src/transformers/models/vilt/convert_vilt_original_to_pytorch.py src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py src/transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/visual_bert/modeling_visual_bert.py src/transformers/models/vit/convert_dino_to_pytorch.py src/transformers/models/vit/convert_vit_timm_to_pytorch.py src/transformers/models/vit/modeling_flax_vit.py src/transformers/models/vit_hybrid/configuration_vit_hybrid.py src/transformers/models/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py src/transformers/models/vit_hybrid/modeling_vit_hybrid.py src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py src/transformers/models/vit_mae/modeling_tf_vit_mae.py src/transformers/models/vit_msn/configuration_vit_msn.py src/transformers/models/vit_msn/convert_msn_to_pytorch.py src/transformers/models/vivit/configuration_vivit.py src/transformers/models/vivit/convert_vivit_flax_to_pytorch.py src/transformers/models/vivit/image_processing_vivit.py src/transformers/models/vivit/modeling_vivit.py src/transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/wav2vec2/convert_wav2vec2_original_s3prl_checkpoint_to_pytorch.py src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/wavlm/convert_wavlm_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/wavlm/convert_wavlm_original_s3prl_checkpoint_to_pytorch.py src/transformers/models/whisper/convert_openai_to_hf.py src/transformers/models/whisper/english_normalizer.py src/transformers/models/whisper/modeling_flax_whisper.py src/transformers/models/x_clip/configuration_x_clip.py src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py src/transformers/models/xglm/configuration_xglm.py src/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py src/transformers/models/xglm/modeling_flax_xglm.py src/transformers/models/xglm/modeling_tf_xglm.py src/transformers/models/xglm/modeling_xglm.py src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/xlm/modeling_tf_xlm.py src/transformers/models/xlm/modeling_xlm.py src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py src/transformers/models/xlm_roberta/modeling_xlm_roberta.py src/transformers/models/xlm_roberta_xl/convert_xlm_roberta_xl_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py src/transformers/models/xlnet/modeling_tf_xlnet.py src/transformers/models/xlnet/modeling_xlnet.py src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/yolos/convert_yolos_to_pytorch.py src/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py src/transformers/models/yoso/modeling_yoso.py src/transformers/onnx/__main__.py src/transformers/onnx/config.py src/transformers/onnx/convert.py src/transformers/onnx/features.py src/transformers/onnx/utils.py src/transformers/optimization.py src/transformers/optimization_tf.py src/transformers/pipelines/audio_classification.py src/transformers/pipelines/audio_utils.py src/transformers/pipelines/automatic_speech_recognition.py src/transformers/pipelines/base.py src/transformers/pipelines/conversational.py src/transformers/pipelines/depth_estimation.py src/transformers/pipelines/document_question_answering.py src/transformers/pipelines/feature_extraction.py src/transformers/pipelines/fill_mask.py src/transformers/pipelines/image_classification.py src/transformers/pipelines/image_segmentation.py src/transformers/pipelines/image_to_text.py src/transformers/pipelines/mask_generation.py src/transformers/pipelines/object_detection.py src/transformers/pipelines/pt_utils.py src/transformers/pipelines/question_answering.py src/transformers/pipelines/table_question_answering.py src/transformers/pipelines/text_classification.py src/transformers/pipelines/token_classification.py src/transformers/pipelines/video_classification.py src/transformers/pipelines/visual_question_answering.py src/transformers/pipelines/zero_shot_audio_classification.py src/transformers/pipelines/zero_shot_classification.py src/transformers/pipelines/zero_shot_image_classification.py src/transformers/pipelines/zero_shot_object_detection.py src/transformers/processing_utils.py src/transformers/pytorch_utils.py src/transformers/sagemaker/trainer_sm.py src/transformers/sagemaker/training_args_sm.py src/transformers/testing_utils.py src/transformers/tf_utils.py src/transformers/time_series_utils.py src/transformers/tokenization_utils.py src/transformers/tokenization_utils_base.py src/transformers/tokenization_utils_fast.py src/transformers/tools/agent_types.py src/transformers/tools/agents.py src/transformers/tools/base.py src/transformers/tools/document_question_answering.py src/transformers/tools/evaluate_agent.py src/transformers/tools/image_captioning.py src/transformers/tools/image_question_answering.py src/transformers/tools/image_segmentation.py src/transformers/tools/prompts.py src/transformers/tools/python_interpreter.py src/transformers/tools/speech_to_text.py src/transformers/tools/text_classification.py src/transformers/tools/text_question_answering.py src/transformers/tools/text_summarization.py src/transformers/tools/text_to_speech.py src/transformers/tools/translation.py src/transformers/trainer.py src/transformers/trainer_callback.py src/transformers/trainer_pt_utils.py src/transformers/trainer_seq2seq.py src/transformers/trainer_tf.py src/transformers/trainer_utils.py src/transformers/training_args.py src/transformers/training_args_seq2seq.py src/transformers/training_args_tf.py src/transformers/utils/backbone_utils.py src/transformers/utils/bitsandbytes.py src/transformers/utils/constants.py src/transformers/utils/doc.py src/transformers/utils/dummy_detectron2_objects.py src/transformers/utils/dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects.py src/transformers/utils/dummy_flax_objects.py src/transformers/utils/dummy_keras_nlp_objects.py src/transformers/utils/dummy_music_objects.py src/transformers/utils/dummy_pt_objects.py src/transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py src/transformers/utils/dummy_sentencepiece_objects.py src/transformers/utils/dummy_speech_objects.py src/transformers/utils/dummy_tensorflow_text_objects.py src/transformers/utils/dummy_tf_objects.py src/transformers/utils/dummy_tokenizers_objects.py src/transformers/utils/dummy_vision_objects.py src/transformers/utils/fx.py src/transformers/utils/generic.py src/transformers/utils/hp_naming.py src/transformers/utils/hub.py src/transformers/utils/import_utils.py src/transformers/utils/logging.py src/transformers/utils/model_parallel_utils.py src/transformers/utils/notebook.py src/transformers/utils/peft_utils.py src/transformers/utils/quantization_config.py src/transformers/utils/sentencepiece_model_pb2.py src/transformers/utils/sentencepiece_model_pb2_new.py src/transformers/utils/versions.py
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/check_dummies.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for making sure the dummies in utils/dummies_xxx.py are up to date with the main init. Why dummies? This is to make sure that a user can always import all objects from `transformers`, even if they don't have the necessary extra libs installed. Those objects will then raise helpful error message whenever the user tries to access one of their methods. Usage (from the root of the repo): Check that the dummy files are up to date (used in `make repo-consistency`): ```bash python utils/check_dummies.py ``` Update the dummy files if needed (used in `make fix-copies`): ```bash python utils/check_dummies.py --fix_and_overwrite ``` """ import argparse import os import re from typing import Dict, List, Optional # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py PATH_TO_TRANSFORMERS = "src/transformers" # Matches is_xxx_available() _re_backend = re.compile(r"is\_([a-z_]*)_available()") # Matches from xxx import bla _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Matches if not is_xxx_available() _re_test_backend = re.compile(r"^\s+if\s+not\s+\(?is\_[a-z_]*\_available\(\)") # Template for the dummy objects. DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def find_backend(line: str) -> Optional[str]: """ Find one (or multiple) backend in a code line of the init. Args: line (`str`): A code line in an init file. Returns: Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so `xxx_and_yyy` for instance). """ if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return "_and_".join(backends) def read_init() -> Dict[str, List[str]]: """ Read the init and extract backend-specific objects. Returns: Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend. """ with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Get to the point we do the actual imports for type checking line_index = 0 while not lines[line_index].startswith("if TYPE_CHECKING"): line_index += 1 backend_specific_objects = {} # Go through the end of the file while line_index < len(lines): # If the line is an if is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) if backend is not None: while not lines[line_index].startswith(" else:"): line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_single_line_import.search(line) if single_line_import_search is not None: # Single-line imports objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): # Multiple-line imports (with 3 indent level) objects.append(line[12:-2]) line_index += 1 backend_specific_objects[backend] = objects else: line_index += 1 return backend_specific_objects def create_dummy_object(name: str, backend_name: str) -> str: """ Create the code for a dummy object. Args: name (`str`): The name of the object. backend_name (`str`): The name of the backend required for that object. Returns: `str`: The code of the dummy object. """ if name.isupper(): return DUMMY_CONSTANT.format(name) elif name.islower(): return DUMMY_FUNCTION.format(name, backend_name) else: return DUMMY_CLASS.format(name, backend_name) def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]] = None) -> Dict[str, str]: """ Create the content of the dummy files. Args: backend_specific_objects (`Dict[str, List[str]]`, *optional*): The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling `read_init()`. Returns: `Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file. """ if backend_specific_objects is None: backend_specific_objects = read_init() dummy_files = {} for backend, objects in backend_specific_objects.items(): backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_files[backend] = dummy_file return dummy_files def check_dummies(overwrite: bool = False): """ Check if the dummy files are up to date and maybe `overwrite` with the right content. Args: overwrite (`bool`, *optional*, default to `False`): Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date when `overwrite=False`. """ dummy_files = create_dummy_files() # For special correspondence backend name to shortcut as used in utils/dummy_xxx_objects.py short_names = {"torch": "pt"} # Locate actual dummy modules and read their content. path = os.path.join(PATH_TO_TRANSFORMERS, "utils") dummy_file_paths = { backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py") for backend in dummy_files.keys() } actual_dummies = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(file_path): with open(file_path, "r", encoding="utf-8", newline="\n") as f: actual_dummies[backend] = f.read() else: actual_dummies[backend] = "" # Compare actual with what they should be. for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main " "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f: f.write(dummy_files[backend]) else: raise ValueError( "The main __init__ has objects that are not present in " f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` " "to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_dummies(args.fix_and_overwrite)
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/check_config_docstrings.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py PATH_TO_TRANSFORMERS = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)") CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", } def get_checkpoint_from_config_class(config_class): checkpoint = None # source code of `config_class` config_source = inspect.getsource(config_class) checkpoints = _re_checkpoint.findall(config_source) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/"): ckpt_link = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: checkpoint = ckpt_name break return checkpoint def check_config_docstrings_have_checkpoints(): configs_without_checkpoint = [] for config_class in list(CONFIG_MAPPING.values()): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue checkpoint = get_checkpoint_from_config_class(config_class) name = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(name) if len(configs_without_checkpoint) > 0: message = "\n".join(sorted(configs_without_checkpoint)) raise ValueError( f"The following configurations don't contain any valid checkpoint:\n{message}\n\n" "The requirement is to include a link pointing to one of the models of this architecture in the " "docstring of the config classes listed above. The link should have be a markdown format like " "[myorg/mymodel](https://huggingface.co/myorg/mymodel)." ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
0
hf_public_repos/transformers
hf_public_repos/transformers/utils/get_github_job_time.py
import argparse import math import traceback import dateutil.parser as date_parser import requests def extract_time_from_single_job(job): """Extract time info from a single job in a GitHub Actions workflow run""" job_info = {} start = job["started_at"] end = job["completed_at"] start_datetime = date_parser.parse(start) end_datetime = date_parser.parse(end) duration_in_min = round((end_datetime - start_datetime).total_seconds() / 60.0) job_info["started_at"] = start job_info["completed_at"] = end job_info["duration"] = duration_in_min return job_info def get_job_time(workflow_run_id, token=None): """Extract time info for all jobs in a GitHub Actions workflow run""" headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" result = requests.get(url, headers=headers).json() job_time = {} try: job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}", headers=headers).json() job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) return job_time except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}") return {} if __name__ == "__main__": r""" Example: python get_github_job_time.py --workflow_run_id 2945609517 """ parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") args = parser.parse_args() job_time = get_job_time(args.workflow_run_id) job_time = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'{k}: {v["duration"]}')
0