| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| import unittest |
|
|
| from packaging import version |
|
|
| from transformers import AlbertConfig, AutoTokenizer, is_torch_available |
| from transformers.models.auto import get_values |
| from transformers.testing_utils import require_torch, slow, torch_device |
|
|
| from ...test_configuration_common import ConfigTester |
| from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask |
| from ...test_pipeline_mixin import PipelineTesterMixin |
|
|
|
|
| if is_torch_available(): |
| import torch |
|
|
| from transformers import ( |
| MODEL_FOR_PRETRAINING_MAPPING, |
| AlbertForMaskedLM, |
| AlbertForMultipleChoice, |
| AlbertForPreTraining, |
| AlbertForQuestionAnswering, |
| AlbertForSequenceClassification, |
| AlbertForTokenClassification, |
| AlbertModel, |
| ) |
|
|
|
|
| class AlbertModelTester: |
| def __init__( |
| self, |
| parent, |
| batch_size=13, |
| seq_length=7, |
| is_training=True, |
| use_input_mask=True, |
| use_token_type_ids=True, |
| use_labels=True, |
| vocab_size=99, |
| embedding_size=16, |
| hidden_size=36, |
| num_hidden_layers=2, |
| |
| num_hidden_groups=2, |
| num_attention_heads=6, |
| intermediate_size=37, |
| hidden_act="gelu", |
| hidden_dropout_prob=0.1, |
| attention_probs_dropout_prob=0.1, |
| max_position_embeddings=512, |
| type_vocab_size=16, |
| type_sequence_label_size=2, |
| initializer_range=0.02, |
| num_labels=3, |
| num_choices=4, |
| scope=None, |
| ): |
| self.parent = parent |
| self.batch_size = batch_size |
| self.seq_length = seq_length |
| self.is_training = is_training |
| self.use_input_mask = use_input_mask |
| self.use_token_type_ids = use_token_type_ids |
| self.use_labels = use_labels |
| self.vocab_size = vocab_size |
| self.embedding_size = embedding_size |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_hidden_groups = num_hidden_groups |
| self.num_attention_heads = num_attention_heads |
| self.intermediate_size = intermediate_size |
| self.hidden_act = hidden_act |
| self.hidden_dropout_prob = hidden_dropout_prob |
| self.attention_probs_dropout_prob = attention_probs_dropout_prob |
| self.max_position_embeddings = max_position_embeddings |
| self.type_vocab_size = type_vocab_size |
| self.type_sequence_label_size = type_sequence_label_size |
| self.initializer_range = initializer_range |
| self.num_labels = num_labels |
| self.num_choices = num_choices |
| self.scope = scope |
|
|
| def prepare_config_and_inputs(self): |
| input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) |
|
|
| input_mask = None |
| if self.use_input_mask: |
| input_mask = random_attention_mask([self.batch_size, self.seq_length]) |
|
|
| token_type_ids = None |
| if self.use_token_type_ids: |
| token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) |
|
|
| sequence_labels = None |
| token_labels = None |
| choice_labels = None |
| if self.use_labels: |
| sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) |
| token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) |
| choice_labels = ids_tensor([self.batch_size], self.num_choices) |
|
|
| config = self.get_config() |
|
|
| return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
| def get_config(self): |
| return AlbertConfig( |
| vocab_size=self.vocab_size, |
| hidden_size=self.hidden_size, |
| num_hidden_layers=self.num_hidden_layers, |
| num_attention_heads=self.num_attention_heads, |
| intermediate_size=self.intermediate_size, |
| hidden_act=self.hidden_act, |
| hidden_dropout_prob=self.hidden_dropout_prob, |
| attention_probs_dropout_prob=self.attention_probs_dropout_prob, |
| max_position_embeddings=self.max_position_embeddings, |
| type_vocab_size=self.type_vocab_size, |
| initializer_range=self.initializer_range, |
| num_hidden_groups=self.num_hidden_groups, |
| ) |
|
|
| def create_and_check_model( |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
| ): |
| model = AlbertModel(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) |
| result = model(input_ids, token_type_ids=token_type_ids) |
| result = model(input_ids) |
| self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) |
| self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) |
|
|
| def create_and_check_for_pretraining( |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
| ): |
| model = AlbertForPreTraining(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model( |
| input_ids, |
| attention_mask=input_mask, |
| token_type_ids=token_type_ids, |
| labels=token_labels, |
| sentence_order_label=sequence_labels, |
| ) |
| self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) |
| self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels)) |
|
|
| def create_and_check_for_masked_lm( |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
| ): |
| model = AlbertForMaskedLM(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) |
|
|
| def create_and_check_for_question_answering( |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
| ): |
| model = AlbertForQuestionAnswering(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model( |
| input_ids, |
| attention_mask=input_mask, |
| token_type_ids=token_type_ids, |
| start_positions=sequence_labels, |
| end_positions=sequence_labels, |
| ) |
| self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) |
| self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) |
|
|
| def create_and_check_for_sequence_classification( |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
| ): |
| config.num_labels = self.num_labels |
| model = AlbertForSequenceClassification(config) |
| model.to(torch_device) |
| model.eval() |
| result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) |
|
|
| def create_and_check_for_multiple_choice( |
| self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
| ): |
| config.num_choices = self.num_choices |
| model = AlbertForMultipleChoice(config=config) |
| model.to(torch_device) |
| model.eval() |
| multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() |
| multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() |
| multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() |
| result = model( |
| multiple_choice_inputs_ids, |
| attention_mask=multiple_choice_input_mask, |
| token_type_ids=multiple_choice_token_type_ids, |
| labels=choice_labels, |
| ) |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) |
|
|
| def prepare_config_and_inputs_for_common(self): |
| config_and_inputs = self.prepare_config_and_inputs() |
| ( |
| config, |
| input_ids, |
| token_type_ids, |
| input_mask, |
| sequence_labels, |
| token_labels, |
| choice_labels, |
| ) = config_and_inputs |
| inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} |
| return config, inputs_dict |
|
|
|
|
| @require_torch |
| class AlbertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): |
| all_model_classes = ( |
| ( |
| AlbertModel, |
| AlbertForPreTraining, |
| AlbertForMaskedLM, |
| AlbertForMultipleChoice, |
| AlbertForSequenceClassification, |
| AlbertForTokenClassification, |
| AlbertForQuestionAnswering, |
| ) |
| if is_torch_available() |
| else () |
| ) |
| pipeline_model_mapping = ( |
| { |
| "feature-extraction": AlbertModel, |
| "fill-mask": AlbertForMaskedLM, |
| "question-answering": AlbertForQuestionAnswering, |
| "text-classification": AlbertForSequenceClassification, |
| "token-classification": AlbertForTokenClassification, |
| "zero-shot": AlbertForSequenceClassification, |
| } |
| if is_torch_available() |
| else {} |
| ) |
| fx_compatible = True |
|
|
| |
| def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): |
| inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) |
|
|
| if return_labels: |
| if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): |
| inputs_dict["labels"] = torch.zeros( |
| (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device |
| ) |
| inputs_dict["sentence_order_label"] = torch.zeros( |
| self.model_tester.batch_size, dtype=torch.long, device=torch_device |
| ) |
| return inputs_dict |
|
|
| def setUp(self): |
| self.model_tester = AlbertModelTester(self) |
| self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) |
|
|
| def test_config(self): |
| self.config_tester.run_common_tests() |
|
|
| def test_model(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_model(*config_and_inputs) |
|
|
| def test_for_pretraining(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_for_pretraining(*config_and_inputs) |
|
|
| def test_for_masked_lm(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) |
|
|
| def test_for_multiple_choice(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) |
|
|
| def test_for_question_answering(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_for_question_answering(*config_and_inputs) |
|
|
| def test_for_sequence_classification(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) |
|
|
| def test_model_various_embeddings(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| for type in ["absolute", "relative_key", "relative_key_query"]: |
| config_and_inputs[0].position_embedding_type = type |
| self.model_tester.create_and_check_model(*config_and_inputs) |
|
|
| @slow |
| def test_model_from_pretrained(self): |
| model_name = "albert/albert-base-v1" |
| model = AlbertModel.from_pretrained(model_name) |
| self.assertIsNotNone(model) |
|
|
|
|
| @require_torch |
| class AlbertModelIntegrationTest(unittest.TestCase): |
| @slow |
| def test_inference_no_head_absolute_embedding(self): |
| model = AlbertModel.from_pretrained("albert/albert-base-v2") |
| input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) |
| attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) |
| with torch.no_grad(): |
| output = model(input_ids, attention_mask=attention_mask)[0] |
| expected_shape = torch.Size((1, 11, 768)) |
| self.assertEqual(output.shape, expected_shape) |
| expected_slice = torch.tensor( |
| [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] |
| ) |
|
|
| torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4) |
|
|
| @slow |
| def test_export(self): |
| if version.parse(torch.__version__) < version.parse("2.4.0"): |
| self.skipTest(reason="This test requires torch >= 2.4 to run.") |
|
|
| distilbert_model = "albert/albert-base-v2" |
| device = "cpu" |
| attn_implementation = "sdpa" |
| max_length = 64 |
|
|
| tokenizer = AutoTokenizer.from_pretrained(distilbert_model) |
| inputs = tokenizer( |
| f"Paris is the {tokenizer.mask_token} of France.", |
| return_tensors="pt", |
| padding="max_length", |
| max_length=max_length, |
| ) |
|
|
| model = AlbertForMaskedLM.from_pretrained( |
| distilbert_model, |
| device_map=device, |
| attn_implementation=attn_implementation, |
| ) |
|
|
| logits = model(**inputs).logits |
| eg_predicted_mask = tokenizer.decode(logits[0, 4].topk(5).indices) |
| self.assertEqual( |
| eg_predicted_mask.split(), |
| ["capital", "capitol", "comune", "arrondissement", "bastille"], |
| ) |
|
|
| exported_program = torch.export.export( |
| model, |
| args=(inputs["input_ids"],), |
| kwargs={"attention_mask": inputs["attention_mask"]}, |
| strict=True, |
| ) |
|
|
| result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"]) |
| ep_predicted_mask = tokenizer.decode(result.logits[0, 4].topk(5).indices) |
| self.assertEqual(eg_predicted_mask, ep_predicted_mask) |
|
|