Spaces:
Runtime error
Runtime error
| # coding=utf-8 | |
| # Copyright 2021 The HuggingFace Inc. team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """ Testing suite for the PyTorch CANINE model. """ | |
| import unittest | |
| from typing import List, Tuple | |
| from transformers import CanineConfig, is_torch_available | |
| from transformers.testing_utils import require_torch, slow, torch_device | |
| from ...test_configuration_common import ConfigTester | |
| from ...test_modeling_common import ModelTesterMixin, _config_zero_init, global_rng, ids_tensor, random_attention_mask | |
| from ...test_pipeline_mixin import PipelineTesterMixin | |
| if is_torch_available(): | |
| import torch | |
| from transformers import ( | |
| CanineForMultipleChoice, | |
| CanineForQuestionAnswering, | |
| CanineForSequenceClassification, | |
| CanineForTokenClassification, | |
| CanineModel, | |
| ) | |
| from transformers.models.canine.modeling_canine import CANINE_PRETRAINED_MODEL_ARCHIVE_LIST | |
| class CanineModelTester: | |
| def __init__( | |
| self, | |
| parent, | |
| batch_size=13, | |
| seq_length=7, | |
| is_training=True, | |
| use_input_mask=True, | |
| use_token_type_ids=True, | |
| use_labels=True, | |
| # let's use a vocab size that's way bigger than BERT's one | |
| vocab_size=100000, | |
| hidden_size=32, | |
| num_hidden_layers=5, | |
| num_attention_heads=4, | |
| intermediate_size=37, | |
| hidden_act="gelu", | |
| hidden_dropout_prob=0.1, | |
| attention_probs_dropout_prob=0.1, | |
| max_position_embeddings=512, | |
| type_vocab_size=16, | |
| type_sequence_label_size=2, | |
| initializer_range=0.02, | |
| num_labels=3, | |
| num_choices=4, | |
| scope=None, | |
| ): | |
| self.parent = parent | |
| self.batch_size = batch_size | |
| self.seq_length = seq_length | |
| self.is_training = is_training | |
| self.use_input_mask = use_input_mask | |
| self.use_token_type_ids = use_token_type_ids | |
| self.use_labels = use_labels | |
| self.vocab_size = vocab_size | |
| self.hidden_size = hidden_size | |
| self.num_hidden_layers = num_hidden_layers | |
| self.num_attention_heads = num_attention_heads | |
| self.intermediate_size = intermediate_size | |
| self.hidden_act = hidden_act | |
| self.hidden_dropout_prob = hidden_dropout_prob | |
| self.attention_probs_dropout_prob = attention_probs_dropout_prob | |
| self.max_position_embeddings = max_position_embeddings | |
| self.type_vocab_size = type_vocab_size | |
| self.type_sequence_label_size = type_sequence_label_size | |
| self.initializer_range = initializer_range | |
| self.num_labels = num_labels | |
| self.num_choices = num_choices | |
| self.scope = scope | |
| def prepare_config_and_inputs(self): | |
| input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) | |
| input_mask = None | |
| if self.use_input_mask: | |
| input_mask = random_attention_mask([self.batch_size, self.seq_length]) | |
| token_type_ids = None | |
| if self.use_token_type_ids: | |
| token_type_ids = ids_tensor(input_ids.shape, self.type_vocab_size) | |
| sequence_labels = None | |
| token_labels = None | |
| choice_labels = None | |
| if self.use_labels: | |
| sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) | |
| token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) | |
| choice_labels = ids_tensor([self.batch_size], self.num_choices) | |
| config = self.get_config() | |
| return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels | |
| def get_config(self): | |
| return CanineConfig( | |
| hidden_size=self.hidden_size, | |
| num_hidden_layers=self.num_hidden_layers, | |
| num_attention_heads=self.num_attention_heads, | |
| intermediate_size=self.intermediate_size, | |
| hidden_act=self.hidden_act, | |
| hidden_dropout_prob=self.hidden_dropout_prob, | |
| attention_probs_dropout_prob=self.attention_probs_dropout_prob, | |
| max_position_embeddings=self.max_position_embeddings, | |
| type_vocab_size=self.type_vocab_size, | |
| is_decoder=False, | |
| initializer_range=self.initializer_range, | |
| ) | |
| def create_and_check_model( | |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels | |
| ): | |
| model = CanineModel(config=config) | |
| model.to(torch_device) | |
| model.eval() | |
| result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) | |
| result = model(input_ids, token_type_ids=token_type_ids) | |
| result = model(input_ids) | |
| self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) | |
| def create_and_check_for_question_answering( | |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels | |
| ): | |
| model = CanineForQuestionAnswering(config=config) | |
| model.to(torch_device) | |
| model.eval() | |
| result = model( | |
| input_ids, | |
| attention_mask=input_mask, | |
| token_type_ids=token_type_ids, | |
| start_positions=sequence_labels, | |
| end_positions=sequence_labels, | |
| ) | |
| self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) | |
| self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) | |
| def create_and_check_for_sequence_classification( | |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels | |
| ): | |
| config.num_labels = self.num_labels | |
| model = CanineForSequenceClassification(config) | |
| model.to(torch_device) | |
| model.eval() | |
| result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) | |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) | |
| def create_and_check_for_token_classification( | |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels | |
| ): | |
| config.num_labels = self.num_labels | |
| model = CanineForTokenClassification(config=config) | |
| model.to(torch_device) | |
| model.eval() | |
| result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) | |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) | |
| def create_and_check_for_multiple_choice( | |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels | |
| ): | |
| config.num_choices = self.num_choices | |
| model = CanineForMultipleChoice(config=config) | |
| model.to(torch_device) | |
| model.eval() | |
| multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() | |
| multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() | |
| multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() | |
| result = model( | |
| multiple_choice_inputs_ids, | |
| attention_mask=multiple_choice_input_mask, | |
| token_type_ids=multiple_choice_token_type_ids, | |
| labels=choice_labels, | |
| ) | |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) | |
| def prepare_config_and_inputs_for_common(self): | |
| config_and_inputs = self.prepare_config_and_inputs() | |
| ( | |
| config, | |
| input_ids, | |
| token_type_ids, | |
| input_mask, | |
| sequence_labels, | |
| token_labels, | |
| choice_labels, | |
| ) = config_and_inputs | |
| inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} | |
| return config, inputs_dict | |
| class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): | |
| all_model_classes = ( | |
| ( | |
| CanineModel, | |
| CanineForMultipleChoice, | |
| CanineForQuestionAnswering, | |
| CanineForSequenceClassification, | |
| CanineForTokenClassification, | |
| ) | |
| if is_torch_available() | |
| else () | |
| ) | |
| pipeline_model_mapping = ( | |
| { | |
| "feature-extraction": CanineModel, | |
| "question-answering": CanineForQuestionAnswering, | |
| "text-classification": CanineForSequenceClassification, | |
| "token-classification": CanineForTokenClassification, | |
| "zero-shot": CanineForSequenceClassification, | |
| } | |
| if is_torch_available() | |
| else {} | |
| ) | |
| test_mismatched_shapes = False | |
| test_resize_embeddings = False | |
| test_pruning = False | |
| def setUp(self): | |
| self.model_tester = CanineModelTester(self) | |
| # we set has_text_modality to False as the config has no vocab_size attribute | |
| self.config_tester = ConfigTester(self, config_class=CanineConfig, has_text_modality=False, hidden_size=37) | |
| def test_config(self): | |
| self.config_tester.run_common_tests() | |
| def test_model(self): | |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() | |
| self.model_tester.create_and_check_model(*config_and_inputs) | |
| def test_for_multiple_choice(self): | |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() | |
| self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) | |
| def test_for_question_answering(self): | |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() | |
| self.model_tester.create_and_check_for_question_answering(*config_and_inputs) | |
| def test_for_sequence_classification(self): | |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() | |
| self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) | |
| def test_for_token_classification(self): | |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() | |
| self.model_tester.create_and_check_for_token_classification(*config_and_inputs) | |
| def test_hidden_states_output(self): | |
| def check_hidden_states_output(inputs_dict, config, model_class): | |
| model = model_class(config) | |
| model.to(torch_device) | |
| model.eval() | |
| with torch.no_grad(): | |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) | |
| hidden_states = outputs.hidden_states | |
| # expected_num_layers equals num_hidden_layers of the deep encoder + 1, + 2 for the first shallow encoder, + 2 | |
| # for the final shallow encoder | |
| expected_num_layers = self.model_tester.num_hidden_layers + 1 + 2 + 2 | |
| self.assertEqual(len(hidden_states), expected_num_layers) | |
| seq_length = self.model_tester.seq_length | |
| for i in range(expected_num_layers): | |
| if (i < 2) or ((expected_num_layers - i) < 3): | |
| # the expected length of the hidden_states of the first and final shallow encoders | |
| # is equal to the seq_length | |
| self.assertListEqual( | |
| list(hidden_states[i].shape[-2:]), | |
| [seq_length, self.model_tester.hidden_size], | |
| ) | |
| else: | |
| # the expected length of the hidden_states of the deep encoder need to be updated | |
| # for CANINE since the seq length is downsampled | |
| self.assertListEqual( | |
| list(hidden_states[i].shape[-2:]), | |
| [seq_length // config.downsampling_rate, self.model_tester.hidden_size], | |
| ) | |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() | |
| for model_class in self.all_model_classes: | |
| inputs_dict["output_hidden_states"] = True | |
| check_hidden_states_output(inputs_dict, config, model_class) | |
| # check that output_hidden_states also work using config | |
| del inputs_dict["output_hidden_states"] | |
| config.output_hidden_states = True | |
| check_hidden_states_output(inputs_dict, config, model_class) | |
| def test_attention_outputs(self): | |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() | |
| config.return_dict = True | |
| seq_len = getattr(self.model_tester, "seq_length", None) | |
| for model_class in self.all_model_classes: | |
| inputs_dict["output_attentions"] = True | |
| inputs_dict["output_hidden_states"] = False | |
| config.return_dict = True | |
| model = model_class(config) | |
| model.to(torch_device) | |
| model.eval() | |
| with torch.no_grad(): | |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) | |
| attentions = outputs.attentions | |
| # we add + 2 due to the 2 shallow encoders | |
| self.assertEqual(len(attentions), self.model_tester.num_hidden_layers + 2) | |
| # check that output_attentions also work using config | |
| del inputs_dict["output_attentions"] | |
| config.output_attentions = True | |
| model = model_class(config) | |
| model.to(torch_device) | |
| model.eval() | |
| with torch.no_grad(): | |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) | |
| attentions = outputs.attentions | |
| # we add + 2 due to the 2 shallow encoders | |
| self.assertEqual(len(attentions), self.model_tester.num_hidden_layers + 2) | |
| self.assertListEqual( | |
| list(attentions[0].shape[-3:]), | |
| [self.model_tester.num_attention_heads, seq_len, seq_len], | |
| ) | |
| out_len = len(outputs) | |
| # Check attention is always last and order is fine | |
| inputs_dict["output_attentions"] = True | |
| inputs_dict["output_hidden_states"] = True | |
| model = model_class(config) | |
| model.to(torch_device) | |
| model.eval() | |
| with torch.no_grad(): | |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) | |
| if hasattr(self.model_tester, "num_hidden_states_types"): | |
| added_hidden_states = self.model_tester.num_hidden_states_types | |
| else: | |
| added_hidden_states = 1 | |
| self.assertEqual(out_len + added_hidden_states, len(outputs)) | |
| self_attentions = outputs.attentions | |
| self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers + 2) | |
| self.assertListEqual( | |
| list(self_attentions[0].shape[-3:]), | |
| [self.model_tester.num_attention_heads, seq_len, seq_len], | |
| ) | |
| def test_model_outputs_equivalence(self): | |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() | |
| def set_nan_tensor_to_zero(t): | |
| t[t != t] = 0 | |
| return t | |
| def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): | |
| with torch.no_grad(): | |
| tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) | |
| dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() | |
| def recursive_check(tuple_object, dict_object): | |
| if isinstance(tuple_object, (List, Tuple)): | |
| for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): | |
| recursive_check(tuple_iterable_value, dict_iterable_value) | |
| elif tuple_object is None: | |
| return | |
| else: | |
| self.assertTrue( | |
| torch.allclose( | |
| set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 | |
| ), | |
| msg=( | |
| "Tuple and dict output are not equal. Difference:" | |
| f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" | |
| f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" | |
| f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." | |
| ), | |
| ) | |
| recursive_check(tuple_output, dict_output) | |
| for model_class in self.all_model_classes: | |
| print(model_class) | |
| model = model_class(config) | |
| model.to(torch_device) | |
| model.eval() | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class) | |
| check_equivalence(model, tuple_inputs, dict_inputs) | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| check_equivalence(model, tuple_inputs, dict_inputs) | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class) | |
| check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class) | |
| check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) | |
| tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) | |
| check_equivalence( | |
| model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} | |
| ) | |
| def test_headmasking(self): | |
| if not self.test_head_masking: | |
| return | |
| global_rng.seed(42) | |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() | |
| global_rng.seed() | |
| inputs_dict["output_attentions"] = True | |
| config.output_hidden_states = True | |
| configs_no_init = _config_zero_init(config) # To be sure we have no Nan | |
| for model_class in self.all_model_classes: | |
| model = model_class(config=configs_no_init) | |
| model.to(torch_device) | |
| model.eval() | |
| # Prepare head_mask | |
| # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) | |
| head_mask = torch.ones( | |
| self.model_tester.num_hidden_layers, | |
| self.model_tester.num_attention_heads, | |
| device=torch_device, | |
| ) | |
| head_mask[0, 0] = 0 | |
| head_mask[-1, :-1] = 0 | |
| head_mask.requires_grad_(requires_grad=True) | |
| inputs = self._prepare_for_class(inputs_dict, model_class).copy() | |
| inputs["head_mask"] = head_mask | |
| outputs = model(**inputs, return_dict=True) | |
| # Test that we can get a gradient back for importance score computation | |
| output = sum(t.sum() for t in outputs[0]) | |
| output = output.sum() | |
| output.backward() | |
| multihead_outputs = head_mask.grad | |
| self.assertIsNotNone(multihead_outputs) | |
| self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) | |
| def check_attentions_validity(attentions): | |
| # Remove Nan | |
| for t in attentions: | |
| self.assertLess( | |
| torch.sum(torch.isnan(t)), t.numel() / 4 | |
| ) # Check we don't have more than 25% nans (arbitrary) | |
| attentions = [ | |
| t.masked_fill(torch.isnan(t), 0.0) for t in attentions | |
| ] # remove them (the test is less complete) | |
| self.assertAlmostEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) | |
| self.assertNotEqual(attentions[1][..., -1, :, :].flatten().sum().item(), 0.0) | |
| self.assertAlmostEqual(attentions[-2][..., -2, :, :].flatten().sum().item(), 0.0) | |
| self.assertNotEqual(attentions[-2][..., -1, :, :].flatten().sum().item(), 0.0) | |
| check_attentions_validity(outputs.attentions) | |
| def test_inputs_embeds(self): | |
| # ViT does not use inputs_embeds | |
| pass | |
| def test_model_common_attributes(self): | |
| pass | |
| def test_model_from_pretrained(self): | |
| for model_name in CANINE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: | |
| model = CanineModel.from_pretrained(model_name) | |
| self.assertIsNotNone(model) | |
| class CanineModelIntegrationTest(unittest.TestCase): | |
| def test_inference_no_head(self): | |
| model = CanineModel.from_pretrained("google/canine-s") | |
| # this one corresponds to the first example of the TydiQA dev set (in Swahili) | |
| # fmt: off | |
| input_ids = [57344, 57349, 85, 107, 117, 98, 119, 97, 32, 119, 97, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 111, 114, 105, 32, 110, 105, 32, 107, 105, 97, 115, 105, 32, 103, 97, 110, 105, 63, 57345, 57350, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 111, 114, 105, 32, 44, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 97, 117, 32, 105, 110, 103, 46, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 40, 112, 105, 97, 58, 32, 84, 111, 108, 105, 109, 97, 110, 32, 97, 117, 32, 82, 105, 103, 105, 108, 32, 75, 101, 110, 116, 97, 117, 114, 117, 115, 41, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 105, 110, 97, 121, 111, 110, 103, 39, 97, 97, 32, 115, 97, 110, 97, 32, 107, 97, 116, 105, 107, 97, 32, 97, 110, 103, 97, 32, 121, 97, 32, 107, 117, 115, 105, 110, 105, 32, 107, 119, 101, 110, 121, 101, 32, 107, 117, 110, 100, 105, 110, 121, 111, 116, 97, 32, 121, 97, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 40, 112, 105, 97, 58, 32, 105, 110, 103, 46, 32, 67, 101, 110, 116, 97, 117, 114, 117, 115, 41, 46, 32, 78, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 107, 117, 110, 103, 97, 97, 32, 115, 97, 110, 97, 32, 121, 97, 32, 110, 110, 101, 32, 97, 110, 103, 97, 110, 105, 32, 108, 97, 107, 105, 110, 105, 32, 104, 97, 105, 111, 110, 101, 107, 97, 110, 105, 32, 107, 119, 101, 110, 121, 101, 32, 110, 117, 115, 117, 100, 117, 110, 105, 97, 32, 121, 97, 32, 107, 97, 115, 107, 97, 122, 105, 110, 105, 46, 32, 57351, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 112, 101, 107, 101, 101, 32, 107, 119, 97, 32, 115, 97, 98, 97, 98, 117, 32, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 101, 116, 117, 32, 106, 105, 114, 97, 110, 105, 32, 107, 97, 116, 105, 107, 97, 32, 97, 110, 103, 97, 32, 105, 110, 97, 32, 117, 109, 98, 97, 108, 105, 32, 119, 97, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 50, 46, 32, 73, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 97, 110, 103, 97, 110, 105, 32, 107, 97, 114, 105, 98, 117, 32, 110, 97, 32, 107, 117, 110, 100, 105, 110, 121, 111, 116, 97, 32, 121, 97, 32, 83, 97, 108, 105, 98, 117, 32, 40, 67, 114, 117, 120, 41, 46, 32, 57352, 32, 82, 105, 106, 105, 108, 105, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 40, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 41, 32, 105, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 107, 97, 109, 97, 32, 110, 121, 111, 116, 97, 32, 109, 111, 106, 97, 32, 108, 97, 107, 105, 110, 105, 32, 107, 119, 97, 32, 100, 97, 114, 117, 98, 105, 110, 105, 32, 107, 117, 98, 119, 97, 32, 105, 110, 97, 111, 110, 101, 107, 97, 110, 97, 32, 107, 117, 119, 97, 32, 109, 102, 117, 109, 111, 32, 119, 97, 32, 110, 121, 111, 116, 97, 32, 116, 97, 116, 117, 32, 122, 105, 110, 97, 122, 111, 107, 97, 97, 32, 107, 97, 114, 105, 98, 117, 32, 110, 97, 32, 107, 117, 115, 104, 105, 107, 97, 109, 97, 110, 97, 32, 107, 97, 116, 105, 32, 121, 97, 111, 46, 32, 78, 121, 111, 116, 97, 32, 109, 97, 112, 97, 99, 104, 97, 32, 122, 97, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 65, 32, 110, 97, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 66, 32, 122, 105, 107, 111, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 51, 54, 32, 107, 117, 116, 111, 107, 97, 32, 107, 119, 101, 116, 117, 32, 110, 97, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 116, 97, 116, 117, 32, 65, 108, 112, 104, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 67, 32, 97, 117, 32, 80, 114, 111, 120, 105, 109, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 105, 110, 97, 32, 117, 109, 98, 97, 108, 105, 32, 119, 97, 32, 109, 105, 97, 107, 97, 32, 121, 97, 32, 110, 117, 114, 117, 32, 52, 46, 50, 50, 46, 32, 57353, 32, 80, 114, 111, 120, 105, 109, 97, 32, 67, 101, 110, 116, 97, 117, 114, 105, 32, 40, 121, 97, 97, 110, 105, 32, 110, 121, 111, 116, 97, 32, 121, 97, 32, 75, 97, 110, 116, 97, 114, 117, 115, 105, 32, 105, 108, 105, 121, 111, 32, 107, 97, 114, 105, 98, 117, 32, 122, 97, 105, 100, 105, 32, 110, 97, 115, 105, 41, 32, 105, 109, 101, 103, 117, 110, 100, 117, 108, 105, 119, 97, 32, 107, 117, 119, 97, 32, 110, 97, 32, 115, 97, 121, 97, 114, 105, 32, 109, 111, 106, 97, 46, 32, 86, 105, 112, 105, 109, 111, 32, 118, 105, 110, 97, 118, 121, 111, 112, 97, 116, 105, 107, 97, 110, 97, 32, 104, 97, 100, 105, 32, 115, 97, 115, 97, 32, 122, 105, 110, 97, 111, 110, 121, 101, 115, 104, 97, 32, 117, 119, 101, 122, 101, 107, 97, 110, 111, 32, 109, 107, 117, 98, 119, 97, 32, 121, 97, 32, 107, 119, 97, 109, 98, 97, 32, 115, 97, 121, 97, 114, 105, 32, 104, 105, 105, 32, 110, 105, 32, 121, 97, 32, 109, 119, 97, 109, 98, 97, 32, 40, 107, 97, 109, 97, 32, 100, 117, 110, 105, 97, 32, 121, 101, 116, 117, 44, 32, 77, 105, 114, 105, 104, 105, 32, 97, 117, 32, 90, 117, 104, 117, 114, 97, 41, 32, 110, 97, 32, 105, 110, 97, 119, 101, 122, 97, 32, 107, 117, 119, 97, 32, 110, 97, 32, 97, 110, 103, 97, 104, 101, 119, 97, 44, 32, 116, 101, 110, 97, 32, 107, 97, 116, 105, 107, 97, 32, 117, 112, 101, 111, 32, 119, 97, 32, 106, 111, 116, 111, 32, 117, 110, 97, 111, 114, 117, 104, 117, 115, 117, 32, 107, 117, 119, 101, 112, 111, 32, 107, 119, 97, 32, 117, 104, 97, 105, 46, 32, 91, 49, 93, 57345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] | |
| attention_mask = [1 if x != 0 else 0 for x in input_ids] | |
| token_type_ids = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] | |
| # fmt: on | |
| input_ids = torch.tensor([input_ids]) | |
| attention_mask = torch.tensor([attention_mask]) | |
| token_type_ids = torch.tensor([token_type_ids]) | |
| outputs = model(input_ids, attention_mask, token_type_ids) | |
| # verify sequence output | |
| expected_shape = torch.Size((1, 2048, 768)) | |
| self.assertEqual(outputs.last_hidden_state.shape, expected_shape) | |
| expected_slice = torch.tensor( | |
| [ | |
| [-0.161433131, 0.395568609, 0.0407391489], | |
| [-0.108025983, 0.362060368, -0.544592619], | |
| [-0.141537309, 0.180541009, 0.076907], | |
| ] | |
| ) | |
| self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-2)) | |
| # verify pooled output | |
| expected_shape = torch.Size((1, 768)) | |
| self.assertEqual(outputs.pooler_output.shape, expected_shape) | |
| expected_slice = torch.tensor([-0.884311497, -0.529064834, 0.723164916]) | |
| self.assertTrue(torch.allclose(outputs.pooler_output[0, :3], expected_slice, atol=1e-2)) | |