| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import shutil |
| import tempfile |
| import unittest |
| from functools import cached_property |
|
|
| from transformers import BatchEncoding, CanineTokenizer |
| from transformers.testing_utils import require_tokenizers, require_torch |
| from transformers.tokenization_python import AddedToken |
|
|
| from ...test_tokenization_common import TokenizerTesterMixin |
|
|
|
|
| class CanineTokenizationTest(TokenizerTesterMixin, unittest.TestCase): |
| from_pretrained_id = "nielsr/canine-s" |
| tokenizer_class = CanineTokenizer |
| test_slow_tokenizer = True |
| test_rust_tokenizer = False |
|
|
| @classmethod |
| def setUpClass(cls): |
| super().setUpClass() |
| tokenizer = CanineTokenizer() |
| tokenizer.save_pretrained(cls.tmpdirname) |
|
|
| @cached_property |
| def canine_tokenizer(self): |
| return CanineTokenizer.from_pretrained("google/canine-s") |
|
|
| @classmethod |
| def get_tokenizer(cls, pretrained_name=None, **kwargs) -> CanineTokenizer: |
| pretrained_name = pretrained_name or cls.tmpdirname |
| tokenizer = cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) |
| tokenizer._unicode_vocab_size = 1024 |
| return tokenizer |
|
|
| @require_torch |
| def test_prepare_batch_integration(self): |
| tokenizer = self.canine_tokenizer |
| src_text = ["Life is like a box of chocolates.", "You never know what you're gonna get."] |
| expected_src_tokens = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] |
| batch = tokenizer(src_text, padding=True, return_tensors="pt") |
| self.assertIsInstance(batch, BatchEncoding) |
|
|
| result = list(batch.input_ids.numpy()[0]) |
|
|
| self.assertListEqual(expected_src_tokens, result) |
|
|
| self.assertEqual((2, 39), batch.input_ids.shape) |
| self.assertEqual((2, 39), batch.attention_mask.shape) |
|
|
| @require_torch |
| def test_encoding_keys(self): |
| tokenizer = self.canine_tokenizer |
| src_text = ["Once there was a man.", "He wrote a test in HuggingFace Transformers."] |
| batch = tokenizer(src_text, padding=True, return_tensors="pt") |
| |
| self.assertIn("input_ids", batch) |
| self.assertIn("attention_mask", batch) |
| self.assertIn("token_type_ids", batch) |
|
|
| @require_torch |
| def test_max_length_integration(self): |
| tokenizer = self.canine_tokenizer |
| tgt_text = [ |
| "What's the weater?", |
| "It's about 25 degrees.", |
| ] |
| targets = tokenizer( |
| text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors="pt" |
| ) |
| self.assertEqual(32, targets["input_ids"].shape[1]) |
|
|
| |
| def test_save_and_load_tokenizer(self): |
| |
| tokenizers = self.get_tokenizers() |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| self.assertNotEqual(tokenizer.model_max_length, 42) |
|
|
| |
| tokenizers = self.get_tokenizers() |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| |
| tmpdirname = tempfile.mkdtemp() |
|
|
| sample_text = " He is very happy, UNwant\u00e9d,running" |
| before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) |
| tokenizer.save_pretrained(tmpdirname) |
|
|
| after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) |
| after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) |
| self.assertListEqual(before_tokens, after_tokens) |
|
|
| shutil.rmtree(tmpdirname) |
|
|
| tokenizers = self.get_tokenizers(model_max_length=42) |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| |
| tmpdirname = tempfile.mkdtemp() |
|
|
| sample_text = " He is very happy, UNwant\u00e9d,running" |
|
|
| extra_special_tokens = tokenizer.extra_special_tokens |
|
|
| |
| new_extra_special_token = chr(0xE007) |
| extra_special_tokens.append(new_extra_special_token) |
| tokenizer.add_special_tokens( |
| {"extra_special_tokens": extra_special_tokens}, replace_extra_special_tokens=False |
| ) |
| before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) |
| tokenizer.save_pretrained(tmpdirname) |
|
|
| after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) |
| after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) |
| self.assertListEqual(before_tokens, after_tokens) |
| self.assertIn(new_extra_special_token, after_tokenizer.extra_special_tokens) |
| self.assertEqual(after_tokenizer.model_max_length, 42) |
|
|
| tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) |
| self.assertEqual(tokenizer.model_max_length, 43) |
|
|
| shutil.rmtree(tmpdirname) |
|
|
| def test_add_special_tokens(self): |
| tokenizers = self.get_tokenizers(do_lower_case=False) |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| input_text, ids = self.get_clean_sequence(tokenizer) |
|
|
| |
| SPECIAL_TOKEN = 0xE005 |
| special_token = chr(SPECIAL_TOKEN) |
|
|
| tokenizer.add_special_tokens({"cls_token": special_token}) |
| encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False) |
| self.assertEqual(len(encoded_special_token), 1) |
|
|
| text = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=False) |
| encoded = tokenizer.encode(text, add_special_tokens=False) |
|
|
| input_encoded = tokenizer.encode(input_text, add_special_tokens=False) |
| special_token_id = tokenizer.encode(special_token, add_special_tokens=False) |
| self.assertEqual(encoded, input_encoded + special_token_id) |
|
|
| decoded = tokenizer.decode(encoded, skip_special_tokens=True) |
| self.assertTrue(special_token not in decoded) |
|
|
| def test_tokenize_special_tokens(self): |
| tokenizers = self.get_tokenizers(do_lower_case=True) |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| SPECIAL_TOKEN_1 = chr(0xE005) |
| SPECIAL_TOKEN_2 = chr(0xE006) |
| tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=True) |
| tokenizer.add_special_tokens({"extra_special_tokens": [SPECIAL_TOKEN_2]}) |
|
|
| token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) |
| token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) |
|
|
| self.assertEqual(len(token_1), 1) |
| self.assertEqual(len(token_2), 1) |
| self.assertEqual(token_1[0], SPECIAL_TOKEN_1) |
| self.assertEqual(token_2[0], SPECIAL_TOKEN_2) |
|
|
| @require_tokenizers |
| def test_added_token_serializable(self): |
| tokenizers = self.get_tokenizers(do_lower_case=False) |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| |
| NEW_TOKEN = 0xE006 |
| new_token = chr(NEW_TOKEN) |
|
|
| new_token = AddedToken(new_token, lstrip=True) |
| tokenizer.add_special_tokens({"extra_special_tokens": [new_token]}) |
|
|
| with tempfile.TemporaryDirectory() as tmp_dir_name: |
| tokenizer.save_pretrained(tmp_dir_name) |
| tokenizer.from_pretrained(tmp_dir_name) |
|
|
| @require_tokenizers |
| def test_encode_decode_with_spaces(self): |
| tokenizers = self.get_tokenizers(do_lower_case=False) |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| input = "hello world" |
| if self.space_between_special_tokens: |
| output = "[CLS] hello world [SEP]" |
| else: |
| output = input |
| encoded = tokenizer.encode(input, add_special_tokens=False) |
| decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) |
| self.assertIn(decoded, [output, output.lower()]) |
|
|
| |
| def test_tokenizers_common_ids_setters(self): |
| tokenizers = self.get_tokenizers() |
| for tokenizer in tokenizers: |
| with self.subTest(f"{tokenizer.__class__.__name__}"): |
| attributes_list = [ |
| "bos_token", |
| "eos_token", |
| "unk_token", |
| "sep_token", |
| "pad_token", |
| "cls_token", |
| "mask_token", |
| ] |
|
|
| token_to_test_setters = "a" |
| token_id_to_test_setters = ord(token_to_test_setters) |
|
|
| for attr in attributes_list: |
| setattr(tokenizer, attr + "_id", None) |
| self.assertEqual(getattr(tokenizer, attr), None) |
| self.assertEqual(getattr(tokenizer, attr + "_id"), None) |
|
|
| setattr(tokenizer, attr + "_id", token_id_to_test_setters) |
| self.assertEqual(getattr(tokenizer, attr), token_to_test_setters) |
| self.assertEqual(getattr(tokenizer, attr + "_id"), token_id_to_test_setters) |
|
|
| setattr(tokenizer, "extra_special_tokens_ids", []) |
| self.assertListEqual(getattr(tokenizer, "extra_special_tokens"), []) |
| self.assertListEqual(getattr(tokenizer, "extra_special_tokens_ids"), []) |
|
|
| additional_special_token_id = 0xE006 |
| additional_special_token = chr(additional_special_token_id) |
| setattr(tokenizer, "extra_special_tokens_ids", [additional_special_token_id]) |
| self.assertListEqual(getattr(tokenizer, "extra_special_tokens"), [additional_special_token]) |
| self.assertListEqual(getattr(tokenizer, "extra_special_tokens_ids"), [additional_special_token_id]) |
|
|
| @unittest.skip(reason="tokenizer has a fixed vocab_size (namely all possible unicode code points)") |
| def test_add_tokens_tokenizer(self): |
| pass |
|
|
| |
| |
| @unittest.skip(reason="CanineTokenizer does not support do_lower_case = True") |
| def test_added_tokens_do_lower_case(self): |
| pass |
|
|
| @unittest.skip(reason="CanineModel does not support the get_input_embeddings nor the get_vocab method") |
| def test_np_encode_plus_sent_to_model(self): |
| pass |
|
|
| @unittest.skip(reason="CanineModel does not support the get_input_embeddings nor the get_vocab method") |
| def test_torch_encode_plus_sent_to_model(self): |
| pass |
|
|
| @unittest.skip(reason="CanineTokenizer does not have vocabulary") |
| def test_get_vocab(self): |
| pass |
|
|
| @unittest.skip(reason="inputs cannot be pretokenized since ids depend on whole input string") |
| def test_pretokenized_inputs(self): |
| pass |
|
|
| @unittest.skip(reason="CanineTokenizer does not have vocabulary") |
| def test_conversion_reversible(self): |
| pass |
|
|