|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import unittest |
|
|
|
|
|
from transformers import DebertaConfig, is_tf_available |
|
|
from transformers.testing_utils import require_tf, slow |
|
|
|
|
|
from ...test_configuration_common import ConfigTester |
|
|
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask |
|
|
from ...test_pipeline_mixin import PipelineTesterMixin |
|
|
|
|
|
|
|
|
if is_tf_available(): |
|
|
import tensorflow as tf |
|
|
|
|
|
from transformers import ( |
|
|
TFDebertaForMaskedLM, |
|
|
TFDebertaForQuestionAnswering, |
|
|
TFDebertaForSequenceClassification, |
|
|
TFDebertaForTokenClassification, |
|
|
TFDebertaModel, |
|
|
) |
|
|
|
|
|
|
|
|
class TFDebertaModelTester: |
|
|
def __init__( |
|
|
self, |
|
|
parent, |
|
|
batch_size=13, |
|
|
seq_length=7, |
|
|
is_training=True, |
|
|
use_input_mask=True, |
|
|
use_token_type_ids=True, |
|
|
use_labels=True, |
|
|
vocab_size=99, |
|
|
hidden_size=32, |
|
|
num_hidden_layers=2, |
|
|
num_attention_heads=4, |
|
|
intermediate_size=37, |
|
|
hidden_act="gelu", |
|
|
hidden_dropout_prob=0.1, |
|
|
attention_probs_dropout_prob=0.1, |
|
|
max_position_embeddings=512, |
|
|
type_vocab_size=16, |
|
|
type_sequence_label_size=2, |
|
|
initializer_range=0.02, |
|
|
num_labels=3, |
|
|
num_choices=4, |
|
|
scope=None, |
|
|
): |
|
|
self.parent = parent |
|
|
self.batch_size = 13 |
|
|
self.seq_length = 7 |
|
|
self.is_training = True |
|
|
self.use_input_mask = True |
|
|
self.use_token_type_ids = True |
|
|
self.use_labels = True |
|
|
self.vocab_size = 99 |
|
|
self.hidden_size = 32 |
|
|
self.num_hidden_layers = 2 |
|
|
self.num_attention_heads = 4 |
|
|
self.intermediate_size = 37 |
|
|
self.hidden_act = "gelu" |
|
|
self.hidden_dropout_prob = 0.1 |
|
|
self.attention_probs_dropout_prob = 0.1 |
|
|
self.max_position_embeddings = 512 |
|
|
self.type_vocab_size = 16 |
|
|
self.relative_attention = False |
|
|
self.max_relative_positions = -1 |
|
|
self.position_biased_input = True |
|
|
self.type_sequence_label_size = 2 |
|
|
self.initializer_range = 0.02 |
|
|
self.num_labels = 3 |
|
|
self.num_choices = 4 |
|
|
self.scope = None |
|
|
|
|
|
def prepare_config_and_inputs(self): |
|
|
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) |
|
|
|
|
|
input_mask = None |
|
|
if self.use_input_mask: |
|
|
input_mask = random_attention_mask([self.batch_size, self.seq_length]) |
|
|
|
|
|
token_type_ids = None |
|
|
if self.use_token_type_ids: |
|
|
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) |
|
|
|
|
|
sequence_labels = None |
|
|
token_labels = None |
|
|
choice_labels = None |
|
|
if self.use_labels: |
|
|
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) |
|
|
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) |
|
|
|
|
|
config = DebertaConfig( |
|
|
vocab_size=self.vocab_size, |
|
|
hidden_size=self.hidden_size, |
|
|
num_hidden_layers=self.num_hidden_layers, |
|
|
num_attention_heads=self.num_attention_heads, |
|
|
intermediate_size=self.intermediate_size, |
|
|
hidden_act=self.hidden_act, |
|
|
hidden_dropout_prob=self.hidden_dropout_prob, |
|
|
attention_probs_dropout_prob=self.attention_probs_dropout_prob, |
|
|
max_position_embeddings=self.max_position_embeddings, |
|
|
type_vocab_size=self.type_vocab_size, |
|
|
relative_attention=self.relative_attention, |
|
|
max_relative_positions=self.max_relative_positions, |
|
|
position_biased_input=self.position_biased_input, |
|
|
initializer_range=self.initializer_range, |
|
|
return_dict=True, |
|
|
) |
|
|
|
|
|
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
|
|
|
def create_and_check_model( |
|
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
): |
|
|
model = TFDebertaModel(config=config) |
|
|
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} |
|
|
|
|
|
inputs = [input_ids, input_mask] |
|
|
result = model(inputs) |
|
|
|
|
|
result = model(input_ids) |
|
|
|
|
|
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) |
|
|
|
|
|
def create_and_check_for_masked_lm( |
|
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
): |
|
|
model = TFDebertaForMaskedLM(config=config) |
|
|
inputs = { |
|
|
"input_ids": input_ids, |
|
|
"attention_mask": input_mask, |
|
|
"token_type_ids": token_type_ids, |
|
|
} |
|
|
result = model(inputs) |
|
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) |
|
|
|
|
|
def create_and_check_for_sequence_classification( |
|
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
): |
|
|
config.num_labels = self.num_labels |
|
|
model = TFDebertaForSequenceClassification(config=config) |
|
|
inputs = { |
|
|
"input_ids": input_ids, |
|
|
"attention_mask": input_mask, |
|
|
"token_type_ids": token_type_ids, |
|
|
} |
|
|
|
|
|
result = model(inputs) |
|
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) |
|
|
|
|
|
def create_and_check_for_token_classification( |
|
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
): |
|
|
config.num_labels = self.num_labels |
|
|
model = TFDebertaForTokenClassification(config=config) |
|
|
inputs = { |
|
|
"input_ids": input_ids, |
|
|
"attention_mask": input_mask, |
|
|
"token_type_ids": token_type_ids, |
|
|
} |
|
|
result = model(inputs) |
|
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) |
|
|
|
|
|
def create_and_check_for_question_answering( |
|
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
): |
|
|
model = TFDebertaForQuestionAnswering(config=config) |
|
|
inputs = { |
|
|
"input_ids": input_ids, |
|
|
"attention_mask": input_mask, |
|
|
"token_type_ids": token_type_ids, |
|
|
} |
|
|
|
|
|
result = model(inputs) |
|
|
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) |
|
|
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) |
|
|
|
|
|
def prepare_config_and_inputs_for_common(self): |
|
|
config_and_inputs = self.prepare_config_and_inputs() |
|
|
( |
|
|
config, |
|
|
input_ids, |
|
|
token_type_ids, |
|
|
input_mask, |
|
|
sequence_labels, |
|
|
token_labels, |
|
|
choice_labels, |
|
|
) = config_and_inputs |
|
|
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} |
|
|
return config, inputs_dict |
|
|
|
|
|
|
|
|
@require_tf |
|
|
class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): |
|
|
all_model_classes = ( |
|
|
( |
|
|
TFDebertaModel, |
|
|
TFDebertaForMaskedLM, |
|
|
TFDebertaForQuestionAnswering, |
|
|
TFDebertaForSequenceClassification, |
|
|
TFDebertaForTokenClassification, |
|
|
) |
|
|
if is_tf_available() |
|
|
else () |
|
|
) |
|
|
pipeline_model_mapping = ( |
|
|
{ |
|
|
"feature-extraction": TFDebertaModel, |
|
|
"fill-mask": TFDebertaForMaskedLM, |
|
|
"question-answering": TFDebertaForQuestionAnswering, |
|
|
"text-classification": TFDebertaForSequenceClassification, |
|
|
"token-classification": TFDebertaForTokenClassification, |
|
|
"zero-shot": TFDebertaForSequenceClassification, |
|
|
} |
|
|
if is_tf_available() |
|
|
else {} |
|
|
) |
|
|
|
|
|
test_head_masking = False |
|
|
test_onnx = False |
|
|
|
|
|
def setUp(self): |
|
|
self.model_tester = TFDebertaModelTester(self) |
|
|
self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37) |
|
|
|
|
|
def test_config(self): |
|
|
self.config_tester.run_common_tests() |
|
|
|
|
|
def test_model(self): |
|
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
|
self.model_tester.create_and_check_model(*config_and_inputs) |
|
|
|
|
|
def test_for_masked_lm(self): |
|
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
|
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) |
|
|
|
|
|
def test_for_question_answering(self): |
|
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
|
self.model_tester.create_and_check_for_question_answering(*config_and_inputs) |
|
|
|
|
|
def test_for_sequence_classification(self): |
|
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
|
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) |
|
|
|
|
|
def test_for_token_classification(self): |
|
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
|
self.model_tester.create_and_check_for_token_classification(*config_and_inputs) |
|
|
|
|
|
@slow |
|
|
def test_model_from_pretrained(self): |
|
|
model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base") |
|
|
self.assertIsNotNone(model) |
|
|
|
|
|
|
|
|
@require_tf |
|
|
class TFDeBERTaModelIntegrationTest(unittest.TestCase): |
|
|
@unittest.skip(reason="Model not available yet") |
|
|
def test_inference_masked_lm(self): |
|
|
pass |
|
|
|
|
|
@slow |
|
|
def test_inference_no_head(self): |
|
|
model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base") |
|
|
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) |
|
|
attention_mask = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) |
|
|
output = model(input_ids, attention_mask=attention_mask)[0] |
|
|
|
|
|
expected_slice = tf.constant( |
|
|
[ |
|
|
[ |
|
|
[-0.59855896, -0.80552566, -0.8462135], |
|
|
[1.4484025, -0.93483794, -0.80593085], |
|
|
[0.3122741, 0.00316059, -1.4131377], |
|
|
] |
|
|
] |
|
|
) |
|
|
tf.debugging.assert_near(output[:, 1:4, 1:4], expected_slice, atol=1e-4) |
|
|
|