repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/bert/test_modeling_bert.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import BertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import CaptureLogger, require_torch, require_torch_accelerator, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLMHeadModel,
BertModel,
logging,
)
from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
class BertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
"""
Returns a tiny configuration by default.
"""
return BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = BertModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BertLMHeadModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_model_for_causal_lm_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = BertLMHeadModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
encoder_hidden_states=encoder_hidden_states,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = BertLMHeadModel(config=config).to(torch_device).eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = BertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
BertModel,
BertLMHeadModel,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BertModel,
"fill-mask": BertForMaskedLM,
"question-answering": BertForQuestionAnswering,
"text-classification": BertForSequenceClassification,
"text-generation": BertLMHeadModel,
"token-classification": BertForTokenClassification,
"zero-shot": BertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = BertModelTester(self)
self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_causal_lm_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_decoder_model_past_with_large_inputs_relative_pos_emb(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
config_and_inputs[0].position_embedding_type = "relative_key"
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_warning_if_padding_and_no_attention_mask(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.model_tester.prepare_config_and_inputs()
# Set pad tokens in the input_ids
input_ids[0, 0] = config.pad_token_id
# Check for warnings if the attention_mask is missing.
logger = logging.get_logger("transformers.modeling_utils")
# clear cache so we can test the warning is emitted (from `warning_once`).
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
model = BertModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, attention_mask=None, token_type_ids=token_type_ids)
self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out)
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_accelerator
def test_torchscript_device_change(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == BertForMultipleChoice:
return
config.torchscript = True
model = model_class(config=config)
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
traced_model = torch.jit.trace(
model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu"))
)
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(traced_model, os.path.join(tmp, "bert.pt"))
loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device)
loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device))
@require_torch
class BertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = BertModel.from_pretrained("bert-base-uncased")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
@slow
def test_inference_no_head_relative_embedding_key(self):
model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]]
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
@slow
def test_inference_no_head_relative_embedding_key_query(self):
model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]]
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/bert/test_tokenization_bert.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertTokenizer
rust_tokenizer_class = BertTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# With lower casing
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_basic_tokenizer_splits_on_punctuation(self):
tokenizer = BasicTokenizer()
text = "a\n'll !!to?'d of, can't."
expected = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(text), expected)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_change_tokenize_chinese_chars(self):
list_of_commun_chinese_char = ["的", "人", "有"]
text_with_chinese_char = "".join(list_of_commun_chinese_char)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
kwargs["tokenize_chinese_chars"] = True
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char)
self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char)
kwargs["tokenize_chinese_chars"] = False
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
# it is expected that only the first Chinese character is not preceded by "##".
expected_tokens = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char)
]
self.assertListEqual(tokens_without_spe_char_p, expected_tokens)
self.assertListEqual(tokens_without_spe_char_r, expected_tokens)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/cvt/test_modeling_tf_cvt.py
|
""" Testing suite for the Tensorflow CvT model. """
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class TFCvtConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "embed_dim"))
self.parent.assertTrue(hasattr(config, "num_heads"))
class TFCvtModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
embed_dim=[16, 32, 48],
num_heads=[1, 2, 3],
depth=[1, 2, 10],
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
patch_padding=[2, 1, 1],
stride_kv=[2, 2, 2],
cls_token=[False, False, True],
attention_drop_rate=[0.0, 0.0, 0.0],
initializer_range=0.02,
layer_norm_eps=1e-12,
is_training=True,
use_labels=True,
num_labels=2,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_sizes = patch_sizes
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.num_channels = num_channels
self.embed_dim = embed_dim
self.num_heads = num_heads
self.stride_kv = stride_kv
self.depth = depth
self.cls_token = cls_token
self.attention_drop_rate = attention_drop_rate
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
# create a random int32 tensor of given shape
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return CvtConfig(
image_size=self.image_size,
num_labels=self.num_labels,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
num_heads=self.num_heads,
patch_sizes=self.patch_sizes,
patch_padding=self.patch_padding,
patch_stride=self.patch_stride,
stride_kv=self.stride_kv,
depth=self.depth,
cls_token=self.cls_token,
attention_drop_rate=self.attention_drop_rate,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = TFCvtModel(config=config)
result = model(pixel_values, training=False)
image_size = (self.image_size, self.image_size)
height, width = image_size[0], image_size[1]
for i in range(len(self.depth)):
height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = TFCvtForImageClassification(config)
result = model(pixel_values, labels=labels, training=False)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class TFCvtModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Cvt
does not use input_ids, inputs_embeds, attention_mask and seq_length.
"""
all_model_classes = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
pipeline_model_mapping = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
test_onnx = False
def setUp(self):
self.model_tester = TFCvtModelTester(self)
self.config_tester = TFCvtConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="TF does not support backprop for grouped convolutions on CPU.",
)
def test_dataset_conversion(self):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="TF does not support backprop for grouped convolutions on CPU.",
)
@slow
def test_keras_fit(self):
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def test_keras_fit_mixed_precision(self):
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = len(self.model_tester.depth)
self.assertEqual(len(hidden_states), expected_num_layers)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCvtModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class TFCvtModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def test_inference_image_classification_head(self):
model = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="tf")
# forward pass
outputs = model(**inputs)
# verify the logits
expected_shape = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = tf.constant([0.9285, 0.9015, -0.3150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/cvt/test_modeling_cvt.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch CvT model. """
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class CvtConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "embed_dim"))
self.parent.assertTrue(hasattr(config, "num_heads"))
class CvtModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
embed_dim=[16, 32, 48],
num_heads=[1, 2, 3],
depth=[1, 2, 10],
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
patch_padding=[2, 1, 1],
stride_kv=[2, 2, 2],
cls_token=[False, False, True],
attention_drop_rate=[0.0, 0.0, 0.0],
initializer_range=0.02,
layer_norm_eps=1e-12,
is_training=True,
use_labels=True,
num_labels=2, # Check
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_sizes = patch_sizes
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.num_channels = num_channels
self.embed_dim = embed_dim
self.num_heads = num_heads
self.stride_kv = stride_kv
self.depth = depth
self.cls_token = cls_token
self.attention_drop_rate = attention_drop_rate
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return CvtConfig(
image_size=self.image_size,
num_labels=self.num_labels,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
num_heads=self.num_heads,
patch_sizes=self.patch_sizes,
patch_padding=self.patch_padding,
patch_stride=self.patch_stride,
stride_kv=self.stride_kv,
depth=self.depth,
cls_token=self.cls_token,
attention_drop_rate=self.attention_drop_rate,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values, labels):
model = CvtModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
image_size = (self.image_size, self.image_size)
height, width = image_size[0], image_size[1]
for i in range(len(self.depth)):
height = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
width = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = CvtForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class CvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Cvt does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
def setUp(self):
self.model_tester = CvtModelTester(self)
self.config_tester = ConfigTester(self, config_class=CvtConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
@unittest.skip(reason="Cvt does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = len(self.model_tester.depth)
self.assertEqual(len(hidden_states), expected_num_layers)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CvtModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class CvtModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def test_inference_image_classification_head(self):
model = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([0.9285, 0.9015, -0.3150]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/sam/test_modeling_tf_sam.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the TensorFlow SAM model. """
from __future__ import annotations
import inspect
import unittest
import numpy as np
import requests
from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import SamProcessor, TFSamModel
if is_vision_available():
from PIL import Image
class TFSamPromptEncoderTester:
def __init__(
self,
hidden_size=32,
input_image_size=24,
patch_size=2,
mask_input_channels=4,
num_point_embeddings=4,
hidden_act="gelu",
):
self.hidden_size = hidden_size
self.input_image_size = input_image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
def get_config(self):
return SamPromptEncoderConfig(
image_size=self.input_image_size,
patch_size=self.patch_size,
mask_input_channels=self.mask_input_channels,
hidden_size=self.hidden_size,
num_point_embeddings=self.num_point_embeddings,
hidden_act=self.hidden_act,
)
def prepare_config_and_inputs(self):
dummy_points = floats_tensor([self.batch_size, 3, 2])
config = self.get_config()
return config, dummy_points
class TFSamMaskDecoderTester:
def __init__(
self,
hidden_size=32,
hidden_act="relu",
mlp_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=32,
layer_norm_eps=1e-6,
):
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.mlp_dim = mlp_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_downsample_rate = attention_downsample_rate
self.num_multimask_outputs = num_multimask_outputs
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.layer_norm_eps = layer_norm_eps
def get_config(self):
return SamMaskDecoderConfig(
hidden_size=self.hidden_size,
hidden_act=self.hidden_act,
mlp_dim=self.mlp_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
attention_downsample_rate=self.attention_downsample_rate,
num_multimask_outputs=self.num_multimask_outputs,
iou_head_depth=self.iou_head_depth,
iou_head_hidden_dim=self.iou_head_hidden_dim,
layer_norm_eps=self.layer_norm_eps,
)
def prepare_config_and_inputs(self):
config = self.get_config()
dummy_inputs = {
"image_embedding": floats_tensor([self.batch_size, self.hidden_size]),
}
return config, dummy_inputs
class TFSamModelTester:
def __init__(
self,
parent,
hidden_size=36,
intermediate_size=72,
projection_dim=62,
output_channels=32,
num_hidden_layers=2,
num_attention_heads=4,
num_channels=3,
image_size=24,
patch_size=2,
hidden_act="gelu",
layer_norm_eps=1e-06,
dropout=0.0,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
qkv_bias=True,
mlp_ratio=4.0,
use_abs_pos=True,
use_rel_pos=True,
rel_pos_zero_init=False,
window_size=14,
global_attn_indexes=[2, 5, 8, 11],
num_pos_feats=16,
mlp_dim=None,
batch_size=2,
):
self.parent = parent
self.image_size = image_size
self.patch_size = patch_size
self.output_channels = output_channels
self.num_channels = num_channels
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.mlp_ratio = mlp_ratio
self.use_abs_pos = use_abs_pos
self.use_rel_pos = use_rel_pos
self.rel_pos_zero_init = rel_pos_zero_init
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.num_pos_feats = num_pos_feats
self.mlp_dim = mlp_dim
self.batch_size = batch_size
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
self.prompt_encoder_tester = TFSamPromptEncoderTester()
self.mask_decoder_tester = TFSamMaskDecoderTester()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
vision_config = SamVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
initializer_factor=self.initializer_factor,
output_channels=self.output_channels,
qkv_bias=self.qkv_bias,
mlp_ratio=self.mlp_ratio,
use_abs_pos=self.use_abs_pos,
use_rel_pos=self.use_rel_pos,
rel_pos_zero_init=self.rel_pos_zero_init,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
num_pos_feats=self.num_pos_feats,
mlp_dim=self.mlp_dim,
)
prompt_encoder_config = self.prompt_encoder_tester.get_config()
mask_decoder_config = self.mask_decoder_tester.get_config()
return SamConfig(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
)
def create_and_check_model(self, config, pixel_values):
model = TFSamModel(config=config)
result = model(pixel_values)
self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3))
self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3))
def create_and_check_get_image_features(self, config, pixel_values):
model = TFSamModel(config=config)
result = model.get_image_embeddings(pixel_values)
self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12))
def create_and_check_get_image_hidden_states(self, config, pixel_values):
model = TFSamModel(config=config)
result = model.vision_encoder(
pixel_values,
output_hidden_states=True,
return_dict=True,
)
# after computing the convolutional features
expected_hidden_states_shape = (self.batch_size, 12, 12, 36)
self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1)
self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape)
result = model.vision_encoder(
pixel_values,
output_hidden_states=True,
return_dict=False,
)
# after computing the convolutional features
expected_hidden_states_shape = (self.batch_size, 12, 12, 36)
self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1)
self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class TFSamModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (TFSamModel,) if is_tf_available() else ()
pipeline_model_mapping = (
{"feature-extraction": TFSamModel, "mask-generation": TFSamModel} if is_tf_available() else {}
)
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_onnx = False
# TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
return True
def setUp(self):
self.model_tester = TFSamModelTester(self)
self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False)
self.prompt_encoder_config_tester = ConfigTester(
self,
config_class=SamPromptEncoderConfig,
has_text_modality=False,
num_attention_heads=12,
num_hidden_layers=2,
)
self.mask_decoder_config_tester = ConfigTester(
self, config_class=SamMaskDecoderConfig, has_text_modality=False
)
def test_config(self):
self.vision_config_tester.run_common_tests()
self.prompt_encoder_config_tester.run_common_tests()
self.mask_decoder_config_tester.run_common_tests()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, tf.keras.layers.Dense))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_get_image_features(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_get_image_features(*config_and_inputs)
def test_image_hidden_states(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
expected_vision_attention_shape = (
self.model_tester.batch_size * self.model_tester.num_attention_heads,
196,
196,
)
expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
vision_attentions = outputs.vision_attentions
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
mask_decoder_attentions = outputs.mask_decoder_attentions
self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
vision_attentions = outputs.vision_attentions
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
mask_decoder_attentions = outputs.mask_decoder_attentions
self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers)
self.assertListEqual(
list(vision_attentions[0].shape[-4:]),
list(expected_vision_attention_shape),
)
self.assertListEqual(
list(mask_decoder_attentions[0].shape[-4:]),
list(expected_mask_decoder_attention_shape),
)
@unittest.skip(reason="Hidden_states is tested in create_and_check_model tests")
def test_hidden_states_output(self):
pass
@slow
def test_model_from_pretrained(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base") # sam-vit-huge blows out our memory
self.assertIsNotNone(model)
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-4, name="outputs", attributes=None):
super().check_pt_tf_outputs(
tf_outputs=tf_outputs,
pt_outputs=pt_outputs,
model_class=model_class,
tol=tol,
name=name,
attributes=attributes,
)
def prepare_image():
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
@require_tf
@slow
class TFSamModelIntegrationTest(unittest.TestCase):
def test_inference_mask_generation_no_point(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
inputs = processor(images=raw_image, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
masks = outputs.pred_masks[0, 0, 0, 0, :3]
self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.4515), atol=2e-4))
self.assertTrue(np.allclose(masks.numpy(), np.array([-4.1807, -3.4949, -3.4483]), atol=1e-2))
def test_inference_mask_generation_one_point_one_bb(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_boxes = [[[650, 900, 1000, 1250]]]
input_points = [[[820, 1080]]]
inputs = processor(images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
masks = outputs.pred_masks[0, 0, 0, 0, :3]
self.assertTrue(np.allclose(scores[-1], np.array(0.9566), atol=2e-4))
self.assertTrue(np.allclose(masks.numpy(), np.array([-12.7657, -12.3683, -12.5985]), atol=2e-2))
def test_inference_mask_generation_batched_points_batched_images(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_points = [
[[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]],
[[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]],
]
inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
masks = outputs.pred_masks[0, 0, 0, 0, :3]
EXPECTED_SCORES = np.array(
[
[
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
],
[
[0.3317, 0.7264, 0.7646],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
],
]
)
EXPECTED_MASKS = np.array([-2.8552, -2.7990, -2.9612])
self.assertTrue(np.allclose(scores.numpy(), EXPECTED_SCORES, atol=1e-3))
self.assertTrue(np.allclose(masks.numpy(), EXPECTED_MASKS, atol=3e-2))
def test_inference_mask_generation_one_point_one_bb_zero(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_boxes = [[[620, 900, 1000, 1255]]]
input_points = [[[820, 1080]]]
labels = [[0]]
inputs = processor(
images=raw_image,
input_boxes=input_boxes,
input_points=input_points,
input_labels=labels,
return_tensors="tf",
)
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7894), atol=1e-4))
def test_inference_mask_generation_one_point(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_points = [[[400, 650]]]
input_labels = [[1]]
inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[-1], np.array(0.9675), atol=1e-4))
# With no label
input_points = [[[400, 650]]]
inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9675), atol=1e-4))
def test_inference_mask_generation_two_points(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_points = [[[400, 650], [800, 650]]]
input_labels = [[1, 1]]
inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4))
# no labels
inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4))
def test_inference_mask_generation_two_points_batched(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_points = [[[400, 650], [800, 650]], [[400, 650]]]
input_labels = [[1, 1], [1]]
inputs = processor(
images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="tf"
)
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[0][-1].numpy(), np.array(0.9762), atol=1e-4))
self.assertTrue(np.allclose(scores[1][-1], np.array(0.9637), atol=1e-4))
def test_inference_mask_generation_one_box(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_boxes = [[[75, 275, 1725, 850]]]
inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="tf")
outputs = model(**inputs)
scores = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7937), atol=1e-4))
def test_inference_mask_generation_batched_image_one_point(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
raw_dog_image = prepare_dog_img()
input_points = [[[820, 1080]], [[220, 470]]]
inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
scores_batched = tf.squeeze(outputs.iou_scores)
input_points = [[[220, 470]]]
inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
scores_single = tf.squeeze(outputs.iou_scores)
self.assertTrue(np.allclose(scores_batched[1, :].numpy(), scores_single.numpy(), atol=1e-4))
def test_inference_mask_generation_two_points_point_batch(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
input_points = tf.convert_to_tensor([[[400, 650]], [[220, 470]]]) # fmt: skip
input_points = tf.expand_dims(input_points, 0)
inputs = processor(raw_image, input_points=input_points, return_tensors="tf")
outputs = model(**inputs)
iou_scores = outputs.iou_scores
self.assertTrue(iou_scores.shape == (1, 2, 3))
self.assertTrue(
np.allclose(
iou_scores.numpy(),
np.array([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]),
atol=1e-4,
rtol=1e-4,
)
)
def test_inference_mask_generation_three_boxes_point_batch(self):
model = TFSamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
raw_image = prepare_image()
# fmt: off
input_boxes = tf.convert_to_tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]])
EXPECTED_IOU = np.array([[[0.9773, 0.9881, 0.9522],
[0.5996, 0.7661, 0.7937],
[0.5996, 0.7661, 0.7937]]])
# fmt: on
input_boxes = tf.expand_dims(input_boxes, 0)
inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="tf")
outputs = model(**inputs)
iou_scores = outputs.iou_scores
self.assertTrue(iou_scores.shape == (1, 3, 3))
self.assertTrue(np.allclose(iou_scores.numpy(), EXPECTED_IOU, atol=1e-4, rtol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/sam/test_modeling_sam.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch SAM model. """
import gc
import unittest
import requests
from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig, pipeline
from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SamModel, SamProcessor
from transformers.models.sam.modeling_sam import SAM_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SamPromptEncoderTester:
def __init__(
self,
hidden_size=32,
input_image_size=24,
patch_size=2,
mask_input_channels=4,
num_point_embeddings=4,
hidden_act="gelu",
):
self.hidden_size = hidden_size
self.input_image_size = input_image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
def get_config(self):
return SamPromptEncoderConfig(
image_size=self.input_image_size,
patch_size=self.patch_size,
mask_input_channels=self.mask_input_channels,
hidden_size=self.hidden_size,
num_point_embeddings=self.num_point_embeddings,
hidden_act=self.hidden_act,
)
def prepare_config_and_inputs(self):
dummy_points = floats_tensor([self.batch_size, 3, 2])
config = self.get_config()
return config, dummy_points
class SamMaskDecoderTester:
def __init__(
self,
hidden_size=32,
hidden_act="relu",
mlp_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=32,
layer_norm_eps=1e-6,
):
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.mlp_dim = mlp_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_downsample_rate = attention_downsample_rate
self.num_multimask_outputs = num_multimask_outputs
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
self.layer_norm_eps = layer_norm_eps
def get_config(self):
return SamMaskDecoderConfig(
hidden_size=self.hidden_size,
hidden_act=self.hidden_act,
mlp_dim=self.mlp_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
attention_downsample_rate=self.attention_downsample_rate,
num_multimask_outputs=self.num_multimask_outputs,
iou_head_depth=self.iou_head_depth,
iou_head_hidden_dim=self.iou_head_hidden_dim,
layer_norm_eps=self.layer_norm_eps,
)
def prepare_config_and_inputs(self):
config = self.get_config()
dummy_inputs = {
"image_embedding": floats_tensor([self.batch_size, self.hidden_size]),
}
return config, dummy_inputs
class SamModelTester:
def __init__(
self,
parent,
hidden_size=36,
intermediate_size=72,
projection_dim=62,
output_channels=32,
num_hidden_layers=2,
num_attention_heads=4,
num_channels=3,
image_size=24,
patch_size=2,
hidden_act="gelu",
layer_norm_eps=1e-06,
dropout=0.0,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
qkv_bias=True,
mlp_ratio=4.0,
use_abs_pos=True,
use_rel_pos=True,
rel_pos_zero_init=False,
window_size=14,
global_attn_indexes=[2, 5, 8, 11],
num_pos_feats=16,
mlp_dim=None,
batch_size=2,
):
self.parent = parent
self.image_size = image_size
self.patch_size = patch_size
self.output_channels = output_channels
self.num_channels = num_channels
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.mlp_ratio = mlp_ratio
self.use_abs_pos = use_abs_pos
self.use_rel_pos = use_rel_pos
self.rel_pos_zero_init = rel_pos_zero_init
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.num_pos_feats = num_pos_feats
self.mlp_dim = mlp_dim
self.batch_size = batch_size
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
self.prompt_encoder_tester = SamPromptEncoderTester()
self.mask_decoder_tester = SamMaskDecoderTester()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
vision_config = SamVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
initializer_factor=self.initializer_factor,
output_channels=self.output_channels,
qkv_bias=self.qkv_bias,
mlp_ratio=self.mlp_ratio,
use_abs_pos=self.use_abs_pos,
use_rel_pos=self.use_rel_pos,
rel_pos_zero_init=self.rel_pos_zero_init,
window_size=self.window_size,
global_attn_indexes=self.global_attn_indexes,
num_pos_feats=self.num_pos_feats,
mlp_dim=self.mlp_dim,
)
prompt_encoder_config = self.prompt_encoder_tester.get_config()
mask_decoder_config = self.mask_decoder_tester.get_config()
return SamConfig(
vision_config=vision_config,
prompt_encoder_config=prompt_encoder_config,
mask_decoder_config=mask_decoder_config,
)
def create_and_check_model(self, config, pixel_values):
model = SamModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3))
self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3))
def create_and_check_get_image_features(self, config, pixel_values):
model = SamModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model.get_image_embeddings(pixel_values)
self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12))
def create_and_check_get_image_hidden_states(self, config, pixel_values):
model = SamModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model.vision_encoder(
pixel_values,
output_hidden_states=True,
return_dict=True,
)
# after computing the convolutional features
expected_hidden_states_shape = (self.batch_size, 12, 12, 36)
self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1)
self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape)
with torch.no_grad():
result = model.vision_encoder(
pixel_values,
output_hidden_states=True,
return_dict=False,
)
# after computing the convolutional features
expected_hidden_states_shape = (self.batch_size, 12, 12, 36)
self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1)
self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (SamModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": SamModel, "mask-generation": SamModel} if is_torch_available() else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torchscript = False
# TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
return True
def setUp(self):
self.model_tester = SamModelTester(self)
self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False)
self.prompt_encoder_config_tester = ConfigTester(
self,
config_class=SamPromptEncoderConfig,
has_text_modality=False,
num_attention_heads=12,
num_hidden_layers=2,
)
self.mask_decoder_config_tester = ConfigTester(
self, config_class=SamMaskDecoderConfig, has_text_modality=False
)
def test_config(self):
self.vision_config_tester.run_common_tests()
self.prompt_encoder_config_tester.run_common_tests()
self.mask_decoder_config_tester.run_common_tests()
@unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_get_image_features(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_get_image_features(*config_and_inputs)
def test_image_hidden_states(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
expected_vision_attention_shape = (
self.model_tester.batch_size * self.model_tester.num_attention_heads,
196,
196,
)
expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
vision_attentions = outputs.vision_attentions
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
mask_decoder_attentions = outputs.mask_decoder_attentions
self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
vision_attentions = outputs.vision_attentions
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
mask_decoder_attentions = outputs.mask_decoder_attentions
self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers)
self.assertListEqual(
list(vision_attentions[0].shape[-4:]),
list(expected_vision_attention_shape),
)
self.assertListEqual(
list(mask_decoder_attentions[0].shape[-4:]),
list(expected_mask_decoder_attention_shape),
)
@unittest.skip(reason="SamModel does not support training")
def test_training(self):
pass
@unittest.skip(reason="SamModel does not support training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="SamModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@unittest.skip(reason="SamModel does not support training")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Hidden_states is tested in create_and_check_model tests")
def test_hidden_states_output(self):
pass
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
# Use a slightly higher default tol to make the tests non-flaky
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes)
@slow
def test_model_from_pretrained(self):
for model_name in SAM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = SamModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def prepare_image():
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_dog_img():
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
@slow
class SamModelIntegrationTest(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_no_point(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
inputs = processor(images=raw_image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
masks = outputs.pred_masks[0, 0, 0, 0, :3]
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.4515), atol=2e-4))
self.assertTrue(torch.allclose(masks, torch.tensor([-4.1800, -3.4948, -3.4481]).to(torch_device), atol=2e-4))
def test_inference_mask_generation_one_point_one_bb(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_boxes = [[[650, 900, 1000, 1250]]]
input_points = [[[820, 1080]]]
inputs = processor(
images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
masks = outputs.pred_masks[0, 0, 0, 0, :3]
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9566), atol=2e-4))
self.assertTrue(
torch.allclose(masks, torch.tensor([-12.7729, -12.3665, -12.6061]).to(torch_device), atol=2e-4)
)
def test_inference_mask_generation_batched_points_batched_images(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_points = [
[[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]],
[[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]],
]
inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="pt").to(
torch_device
)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze().cpu()
masks = outputs.pred_masks[0, 0, 0, 0, :3].cpu()
EXPECTED_SCORES = torch.tensor(
[
[
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
],
[
[0.3317, 0.7264, 0.7646],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
[0.6765, 0.9379, 0.8803],
],
]
)
EXPECTED_MASKS = torch.tensor([-2.8550, -2.7988, -2.9625])
self.assertTrue(torch.allclose(scores, EXPECTED_SCORES, atol=1e-3))
self.assertTrue(torch.allclose(masks, EXPECTED_MASKS, atol=1e-3))
def test_inference_mask_generation_one_point_one_bb_zero(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_boxes = [[[620, 900, 1000, 1255]]]
input_points = [[[820, 1080]]]
labels = [[0]]
inputs = processor(
images=raw_image,
input_boxes=input_boxes,
input_points=input_points,
input_labels=labels,
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7894), atol=1e-4))
def test_inference_mask_generation_one_point(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_points = [[[400, 650]]]
input_labels = [[1]]
inputs = processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4))
# With no label
input_points = [[[400, 650]]]
inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9675), atol=1e-4))
def test_inference_mask_generation_two_points(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_points = [[[400, 650], [800, 650]]]
input_labels = [[1, 1]]
inputs = processor(
images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4))
# no labels
inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.9762), atol=1e-4))
def test_inference_mask_generation_two_points_batched(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_points = [[[400, 650], [800, 650]], [[400, 650]]]
input_labels = [[1, 1], [1]]
inputs = processor(
images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="pt"
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[0][-1], torch.tensor(0.9762), atol=1e-4))
self.assertTrue(torch.allclose(scores[1][-1], torch.tensor(0.9637), atol=1e-4))
def test_inference_mask_generation_one_box(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_boxes = [[[75, 275, 1725, 850]]]
inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores[-1], torch.tensor(0.7937), atol=1e-4))
def test_inference_mask_generation_batched_image_one_point(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
raw_dog_image = prepare_dog_img()
input_points = [[[820, 1080]], [[220, 470]]]
inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="pt").to(
torch_device
)
with torch.no_grad():
outputs = model(**inputs)
scores_batched = outputs.iou_scores.squeeze()
input_points = [[[220, 470]]]
inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
scores_single = outputs.iou_scores.squeeze()
self.assertTrue(torch.allclose(scores_batched[1, :], scores_single, atol=1e-4))
def test_inference_mask_generation_two_points_point_batch(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
input_points = torch.Tensor([[[400, 650]], [[220, 470]]]).cpu() # fmt: skip
input_points = input_points.unsqueeze(0)
inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
iou_scores = outputs.iou_scores.cpu()
self.assertTrue(iou_scores.shape == (1, 2, 3))
torch.testing.assert_allclose(
iou_scores, torch.tensor([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]), atol=1e-4, rtol=1e-4
)
def test_inference_mask_generation_three_boxes_point_batch(self):
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to(torch_device)
model.eval()
raw_image = prepare_image()
# fmt: off
input_boxes = torch.Tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]], [[75, 275, 1725, 850]]]).cpu()
EXPECTED_IOU = torch.tensor([[[0.9773, 0.9881, 0.9522],
[0.5996, 0.7661, 0.7937],
[0.5996, 0.7661, 0.7937]]])
# fmt: on
input_boxes = input_boxes.unsqueeze(0)
inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
iou_scores = outputs.iou_scores.cpu()
self.assertTrue(iou_scores.shape == (1, 3, 3))
torch.testing.assert_allclose(iou_scores, EXPECTED_IOU, atol=1e-4, rtol=1e-4)
def test_dummy_pipeline_generation(self):
generator = pipeline("mask-generation", model="facebook/sam-vit-base", device=torch_device)
raw_image = prepare_image()
_ = generator(raw_image, points_per_batch=64)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/sam/test_processor_sam.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SamProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = SamImageProcessor()
processor = SamProcessor(image_processor)
processor.save_pretrained(self.tmpdirname)
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_additional_features(self):
processor = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, SamImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
processor = SamProcessor(image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
input_feat_extract.pop("original_sizes") # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes") # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
@require_torch
def test_post_process_masks(self):
image_processor = self.get_image_processor()
processor = SamProcessor(image_processor=image_processor)
dummy_masks = [torch.ones((1, 3, 5, 5))]
original_sizes = [[1764, 2646]]
reshaped_input_size = [[683, 1024]]
masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
masks = processor.post_process_masks(
dummy_masks, torch.tensor(original_sizes), torch.tensor(reshaped_input_size)
)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
# should also work with np
dummy_masks = [np.ones((1, 3, 5, 5))]
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size))
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
dummy_masks = [[1, 0], [0, 1]]
with self.assertRaises(ValueError):
masks = processor.post_process_masks(dummy_masks, np.array(original_sizes), np.array(reshaped_input_size))
@require_vision
@require_tf
class TFSamProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = SamImageProcessor()
processor = SamProcessor(image_processor)
processor.save_pretrained(self.tmpdirname)
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_additional_features(self):
processor = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = SamProcessor.from_pretrained(self.tmpdirname, do_normalize=False, padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, SamImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
processor = SamProcessor(image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
input_feat_extract.pop("original_sizes") # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes") # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
@require_tf
def test_post_process_masks(self):
image_processor = self.get_image_processor()
processor = SamProcessor(image_processor=image_processor)
dummy_masks = [tf.ones((1, 3, 5, 5))]
original_sizes = [[1764, 2646]]
reshaped_input_size = [[683, 1024]]
masks = processor.post_process_masks(dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf")
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
masks = processor.post_process_masks(
dummy_masks,
tf.convert_to_tensor(original_sizes),
tf.convert_to_tensor(reshaped_input_size),
return_tensors="tf",
)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
# should also work with np
dummy_masks = [np.ones((1, 3, 5, 5))]
masks = processor.post_process_masks(
dummy_masks, np.array(original_sizes), np.array(reshaped_input_size), return_tensors="tf"
)
self.assertEqual(masks[0].shape, (1, 3, 1764, 2646))
dummy_masks = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
masks = processor.post_process_masks(
dummy_masks, np.array(original_sizes), np.array(reshaped_input_size), return_tensors="tf"
)
@require_vision
@require_torchvision
class SamProcessorEquivalenceTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = SamImageProcessor()
processor = SamProcessor(image_processor)
processor.save_pretrained(self.tmpdirname)
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def test_post_process_masks_equivalence(self):
image_processor = self.get_image_processor()
processor = SamProcessor(image_processor=image_processor)
dummy_masks = np.random.randint(0, 2, size=(1, 3, 5, 5)).astype(np.float32)
tf_dummy_masks = [tf.convert_to_tensor(dummy_masks)]
pt_dummy_masks = [torch.tensor(dummy_masks)]
original_sizes = [[1764, 2646]]
reshaped_input_size = [[683, 1024]]
tf_masks = processor.post_process_masks(
tf_dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf"
)
pt_masks = processor.post_process_masks(
pt_dummy_masks, original_sizes, reshaped_input_size, return_tensors="pt"
)
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def test_image_processor_equivalence(self):
image_processor = self.get_image_processor()
processor = SamProcessor(image_processor=image_processor)
image_input = self.prepare_image_inputs()
pt_input_feat_extract = image_processor(image_input, return_tensors="pt")["pixel_values"].numpy()
pt_input_processor = processor(images=image_input, return_tensors="pt")["pixel_values"].numpy()
tf_input_feat_extract = image_processor(image_input, return_tensors="tf")["pixel_values"].numpy()
tf_input_processor = processor(images=image_input, return_tensors="tf")["pixel_values"].numpy()
self.assertTrue(np.allclose(pt_input_feat_extract, pt_input_processor))
self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_feat_extract))
self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_processor))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Wav2Vec2Phoneme tokenizer."""
import json
import os
import unittest
from typing import Tuple
from transformers import Wav2Vec2PhonemeCTCTokenizer
from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES
from transformers.models.wav2vec2_phoneme.tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = Wav2Vec2PhonemeCTCTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
vocab = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
# overwrite since phonemes require specific creation
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]:
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], do_phonemize=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return Wav2Vec2PhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def test_tokenizer_add_new_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
token_ids = tokenizer("m xxx ɪ", do_phonemize=False).input_ids
self.assertEqual(token_ids, [13, 392, 17]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
token_ids = tokenizer("m aaa ɪ ccc", do_phonemize=False).input_ids
self.assertEqual(token_ids, [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa
token_ids = tokenizer("maɪ c", do_phonemize=False).input_ids
self.assertEqual(token_ids, [3, 200]) # mai should be <unk> (=3)
def test_phonemize(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(phonemes, "h ə l oʊ h aʊ ɑːɹ j uː")
def test_encode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids)
def test_encode_decode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids)
self.assertEqual(phonemes, phonemes_enc_dec)
def test_decode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def test_phonemize_with_word_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(phonemes, "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def test_encode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids)
def test_decode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
tokens = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=False)
batch_tokens = tokenizer.batch_decode(sample_ids, filter_word_delimiter_token=False)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def test_encode_decode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=False)
self.assertEqual(phonemes, phonemes_enc_dec)
def test_encode_decode_with_del_filter(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=True)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip(), phonemes_enc_dec)
def test_change_phonemizer_lang(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=None
)
input_text = "Hello how are you"
input_ids_en = tokenizer(input_text, phonemizer_lang="en-us").input_ids
input_ids_fr = tokenizer(input_text, phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(input_ids_en, input_ids_fr)
text_en = tokenizer.decode(input_ids_en)
text_fr = tokenizer.decode(input_ids_fr)
self.assertEqual(text_en, "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(text_fr, "ɛ l o h aʊ a ʁ j u")
def test_case_insensitive(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text_up = "Hello how Are you"
input_text_low = "hello how are you"
input_ids_up = tokenizer(input_text_up).input_ids
input_ids_low = tokenizer(input_text_low).input_ids
self.assertEqual(input_ids_up, input_ids_low)
def test_tokenizer_decode_added_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def get_from_offsets(offsets, key):
retrieved_list = [d[key] for d in offsets]
return retrieved_list
def test_offsets(self):
tokenizer = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
sample_ids = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
outputs = tokenizer.decode(sample_ids, output_char_offsets=True, filter_word_delimiter_token=False)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()), 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(outputs, Wav2Vec2PhonemeCTCTokenizerOutput))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "char"), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"]
)
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 4, 7, 9, 11, 12, 15, 16]
)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 4, 6, 9, 10, 12, 15, 16, 17]
)
def test_offsets_batch(self):
tokenizer = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(outputs_batch, outputs_list):
self.assertTrue(isinstance(outputs_batch, Wav2Vec2PhonemeCTCTokenizerOutput))
self.assertTrue(isinstance(outputs_list[0], Wav2Vec2PhonemeCTCTokenizerOutput))
# transform list to ModelOutput
outputs_batch_2 = Wav2Vec2PhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]}
)
self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"])
def recursive_check(list_or_dict_1, list_or_dict_2):
if isinstance(list_or_dict_1, list):
[recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)]
self.assertEqual(list_or_dict_1, list_or_dict_2)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"])
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True)
outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids]
check_list_tuples_equal(outputs_char_batch, outputs_char)
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def test_added_tokens_do_lower_case(self):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def test_encode_decode_with_spaces(self):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency")
def test_internal_consistency(self):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing")
def test_pretrained_model_lists(self):
pass
# overwrite common
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokens[-4])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_tf_encode_plus_sent_to_model(self):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_torch_encode_plus_sent_to_model(self):
pass
def test_convert_tokens_to_string_format(self):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokens = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
output = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(output["text"], str)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/flaubert/test_modeling_flaubert.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class FlaubertModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_lengths=True,
use_token_type_ids=True,
use_labels=True,
gelu_activation=True,
sinusoidal_embeddings=False,
causal=False,
asm=False,
n_langs=2,
vocab_size=99,
n_special=0,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=12,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
summary_type="last",
use_proj=None,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_lengths = use_input_lengths
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.gelu_activation = gelu_activation
self.sinusoidal_embeddings = sinusoidal_embeddings
self.causal = causal
self.asm = asm
self.n_langs = n_langs
self.vocab_size = vocab_size
self.n_special = n_special
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.summary_type = summary_type
self.use_proj = use_proj
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_lengths = None
if self.use_input_lengths:
input_lengths = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def get_config(self):
return FlaubertConfig(
vocab_size=self.vocab_size,
n_special=self.n_special,
emb_dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
gelu_activation=self.gelu_activation,
sinusoidal_embeddings=self.sinusoidal_embeddings,
asm=self.asm,
causal=self.causal,
n_langs=self.n_langs,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
summary_type=self.summary_type,
use_proj=self.use_proj,
)
def create_and_check_flaubert_model(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, lengths=input_lengths, langs=token_type_ids)
result = model(input_ids, langs=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_flaubert_lm_head(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertWithLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_flaubert_simple_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_flaubert_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result_with_labels = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
result_with_labels = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
(total_loss,) = result_with_labels.to_tuple()
result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def create_and_check_flaubert_sequence_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = FlaubertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
result = model(input_ids, labels=sequence_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def create_and_check_flaubert_token_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_labels = self.num_labels
model = FlaubertForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_flaubert_multiple_choice(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_choices = self.num_choices
model = FlaubertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
# Flaubert has 2 QA models -> need to manually set the correct labels for one of them here
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = FlaubertModelTester(self)
self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_flaubert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*config_and_inputs)
def test_flaubert_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs)
def test_flaubert_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs)
def test_flaubert_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*config_and_inputs)
def test_flaubert_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs)
def test_flaubert_token_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs)
def test_flaubert_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_accelerator
def test_torchscript_device_change(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
config.torchscript = True
model = model_class(config=config)
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
traced_model = torch.jit.trace(
model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu"))
)
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt"))
loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device)
loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device))
@require_torch
class FlaubertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/flaubert/test_modeling_tf_flaubert.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class TFFlaubertModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_lengths = True
self.use_token_type_ids = True
self.use_labels = True
self.gelu_activation = True
self.sinusoidal_embeddings = False
self.causal = False
self.asm = False
self.n_langs = 2
self.vocab_size = 99
self.n_special = 0
self.hidden_size = 32
self.num_hidden_layers = 2
self.num_attention_heads = 4
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.summary_type = "last"
self.use_proj = True
self.scope = None
self.bos_token_id = 0
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32)
input_lengths = None
if self.use_input_lengths:
input_lengths = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = FlaubertConfig(
vocab_size=self.vocab_size,
n_special=self.n_special,
emb_dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
gelu_activation=self.gelu_activation,
sinusoidal_embeddings=self.sinusoidal_embeddings,
asm=self.asm,
causal=self.causal,
n_langs=self.n_langs,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
summary_type=self.summary_type,
use_proj=self.use_proj,
bos_token_id=self.bos_token_id,
)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def create_and_check_flaubert_model(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = TFFlaubertModel(config=config)
inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_flaubert_lm_head(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = TFFlaubertWithLMHeadModel(config)
inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_flaubert_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = TFFlaubertForQuestionAnsweringSimple(config)
inputs = {"input_ids": input_ids, "lengths": input_lengths}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_flaubert_sequence_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
model = TFFlaubertForSequenceClassification(config)
inputs = {"input_ids": input_ids, "lengths": input_lengths}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def create_and_check_flaubert_for_token_classification(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_labels = self.num_labels
model = TFFlaubertForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_flaubert_for_multiple_choice(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
):
config.num_choices = self.num_choices
model = TFFlaubertForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class TFFlaubertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
all_generative_model_classes = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
pipeline_model_mapping = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def setUp(self):
self.model_tester = TFFlaubertModelTester(self)
self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_flaubert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*config_and_inputs)
def test_flaubert_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs)
def test_flaubert_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*config_and_inputs)
def test_flaubert_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFFlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
@require_sentencepiece
@require_tokenizers
class TFFlaubertModelIntegrationTest(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased")
input_ids = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]],
dtype=tf.int32,
) # "J'aime flaubert !"
output = model(input_ids)[0]
expected_shape = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
],
dtype=tf.float32,
)
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/bit/test_modeling_bit.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Bit model. """
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class BitModelTester:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
embeddings_size=10,
hidden_sizes=[8, 16, 32, 64],
depths=[1, 1, 2, 1],
is_training=True,
use_labels=True,
hidden_act="relu",
num_labels=3,
scope=None,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
num_groups=1,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.embeddings_size = embeddings_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
self.out_features = out_features
self.out_indices = out_indices
self.num_groups = num_groups
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return BitConfig(
num_channels=self.num_channels,
embeddings_size=self.embeddings_size,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
out_features=self.out_features,
out_indices=self.out_indices,
num_groups=self.num_groups,
)
def create_and_check_model(self, config, pixel_values, labels):
model = BitModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = BitForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_backbone(self, config, pixel_values, labels):
model = BitBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = BitBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Bit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
def setUp(self):
self.model_tester = BitModelTester(self)
self.config_tester = ConfigTester(self, config_class=BitConfig, has_text_modality=False)
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
@unittest.skip(reason="Bit does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config=config)
for name, module in model.named_modules():
if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1),
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
self.assertTrue(
torch.all(module.bias == 0),
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Bit does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class BitModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def test_inference_image_classification_head(self):
model = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
@require_torch
class BitBackboneTest(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (BitBackbone,) if is_torch_available() else ()
config_class = BitConfig
has_attentions = False
def setUp(self):
self.model_tester = BitModelTester(self)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/phi/test_modeling_phi.py
|
# coding=utf-8
# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Phi model. """
import unittest
from transformers import PhiConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
PhiForCausalLM,
PhiForSequenceClassification,
PhiForTokenClassification,
PhiModel,
)
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester with Llama->Phi
class PhiModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return PhiConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = PhiModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = PhiModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = PhiForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = PhiForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class PhiModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(PhiModel, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification)
if is_torch_available()
else ()
)
all_generative_model_classes = (PhiForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": PhiModel,
"text-classification": PhiForSequenceClassification,
"text-generation": PhiForCausalLM,
"token-classification": PhiForTokenClassification,
"zero-shot": PhiForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79292/workflows/fa2ba644-8953-44a6-8f67-ccd69ca6a476/jobs/1012905
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
return True
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Phi
def setUp(self):
self.model_tester = PhiModelTester(self)
self.config_tester = ConfigTester(self, config_class=PhiConfig, hidden_size=37)
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config
def test_config(self):
self.config_tester.run_common_tests()
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Phi,llama->phi
def test_phi_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = PhiForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Phi,llama->phi
def test_phi_sequence_classification_model_for_single_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "single_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = PhiForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Phi,llama->phi
def test_phi_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = PhiForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@slow
@require_torch
class PhiIntegrationTest(unittest.TestCase):
def test_model_phi_1_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = PhiForCausalLM.from_pretrained("susnato/phi-1_dev").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) # fmt: skip
self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
def test_model_phi_1_5_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) # fmt: skip
self.assertTrue(torch.allclose(EXPECTED_OUTPUT, output[0, :2, :30], atol=1e-4, rtol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/unispeech_sat/test_modeling_unispeech_sat.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch UniSpeechSat model. """
import math
import unittest
import numpy as np
import pytest
from datasets import load_dataset
from transformers import UniSpeechSatConfig, is_torch_available
from transformers.testing_utils import require_soundfile, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForCTC,
UniSpeechSatForPreTraining,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
UniSpeechSatModel,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
)
class UniSpeechSatModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
mask_time_prob=0.5,
mask_time_length=2,
vocab_size=32,
do_stable_layer_norm=False,
tdnn_dim=(32, 32),
tdnn_kernel=(3, 3),
tdnn_dilation=(1, 1),
xvector_output_dim=32,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.tdnn_dim = tdnn_dim
self.tdnn_kernel = tdnn_kernel
self.tdnn_dilation = tdnn_dilation
self.xvector_output_dim = xvector_output_dim
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return UniSpeechSatConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
mask_time_prob=self.mask_time_prob,
mask_time_length=self.mask_time_length,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
tdnn_dim=self.tdnn_dim,
tdnn_kernel=self.tdnn_kernel,
tdnn_dilation=self.tdnn_dilation,
xvector_output_dim=self.xvector_output_dim,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = UniSpeechSatModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = UniSpeechSatModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = UniSpeechSatForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_seq_classifier_loss(self, config, input_values, *args):
model = UniSpeechSatForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechSatForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechSatForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_xvector_training(self, config, *args):
config.ctc_zero_infinity = True
model = UniSpeechSatForXVector(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
# use a longer sequence length to account for TDNN temporal downsampling
input_values = floats_tensor([self.batch_size, self.seq_length * 2], scale=1.0)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = UniSpeechSatForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
UniSpeechSatForCTC,
UniSpeechSatForPreTraining,
UniSpeechSatModel,
UniSpeechSatForSequenceClassification,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForXVector,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"audio-classification": UniSpeechSatForSequenceClassification,
"automatic-speech-recognition": UniSpeechSatForCTC,
"feature-extraction": UniSpeechSatModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = UniSpeechSatModelTester(self)
self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_xvector_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_xvector_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeechSat has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# UniSpeechSat cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# UniSpeechSat has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"conv.parametrizations.weight",
"masked_spec_embed",
"codevectors",
"quantizer.weight_proj.weight",
"project_hid.weight",
"project_hid.bias",
"project_q.weight",
"project_q.bias",
"feature_projection.projection.weight",
"feature_projection.projection.bias",
"label_embeddings_concat",
"objective.weight",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "codevectors") and module.codevectors is not None:
module.codevectors.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
def test_mask_feature_prob_ctc(self):
model = UniSpeechSatForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_prob_ctc(self):
model = UniSpeechSatForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus")
self.assertIsNotNone(model)
@require_torch
class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatModel, UniSpeechSatForSequenceClassification)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = UniSpeechSatModelTester(
self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True
)
self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_batched_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_batch_inference(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeechSat has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# UniSpeechSat cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# UniSpeechSat has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"conv.parametrizations.weight",
"masked_spec_embed",
"codevectors",
"quantizer.weight_proj.weight",
"project_hid.weight",
"project_hid.bias",
"project_q.weight",
"project_q.bias",
"feature_projection.projection.weight",
"feature_projection.projection.bias",
"label_embeddings_concat",
"objective.weight",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "codevectors") and module.codevectors is not None:
module.codevectors.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
def test_mask_feature_prob_ctc(self):
model = UniSpeechSatForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", mask_feature_prob=0.2, mask_feature_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_prob_ctc(self):
model = UniSpeechSatForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", mask_time_prob=0.2, mask_time_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_feature_prob_ctc_single_batch(self):
model = UniSpeechSatForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat",
mask_time_prob=0.2,
mask_feature_prob=0.2,
mask_time_length=2,
mask_feature_length=2,
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech-sat", return_attention_mask=True
)
batch_duration_in_seconds = [6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (1, 1498, 32))
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large")
self.assertIsNotNone(model)
@require_torch
@require_soundfile
@slow
class UniSpeechSatModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _load_superb(self, task, num_samples):
ds = load_dataset("anton-l/superb_dummy", task, split="test")
return ds[:num_samples]
def test_inference_encoder_base(self):
model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus")
model.to(torch_device)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base", return_attention_mask=True
)
input_speech = self._load_datasamples(2)
inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True)
with torch.no_grad():
outputs = model(
inputs_dict.input_values.to(torch_device),
attention_mask=inputs_dict.attention_mask.to(torch_device),
)
# fmt: off
expected_hidden_states_slice = torch.tensor(
[[[-0.0743, 0.1384],
[-0.0845, 0.1704]],
[[-0.0954, 0.1936],
[-0.1123, 0.2095]]],
device=torch_device,
)
# fmt: on
self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3))
def test_inference_encoder_large(self):
model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large")
model.to(torch_device)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53")
input_speech = self._load_datasamples(2)
inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True)
with torch.no_grad():
outputs = model(
inputs_dict.input_values.to(torch_device),
attention_mask=inputs_dict.attention_mask.to(torch_device),
)
# fmt: off
expected_hidden_states_slice = torch.tensor(
[[[-0.1172, -0.0797],
[-0.0012, 0.0213]],
[[-0.1225, -0.1277],
[-0.0668, -0.0585]]],
device=torch_device,
)
# fmt: on
self.assertTrue(torch.allclose(outputs.last_hidden_state[:, :2, -2:], expected_hidden_states_slice, atol=1e-3))
def test_inference_diarization(self):
model = UniSpeechSatForAudioFrameClassification.from_pretrained("microsoft/unispeech-sat-base-plus-sd").to(
torch_device
)
processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sd")
input_data = self._load_superb("sd", 4)
inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000)
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
with torch.no_grad():
outputs = model(input_values, attention_mask=attention_mask)
# labels is a one-hot array of shape (num_frames, num_speakers)
labels = (outputs.logits > 0).long()
# s3prl logits for the same batch
expected_logits = torch.tensor(
[
[[-5.6119, -5.5845], [-3.7772, -5.4824], [-3.6914, -5.1619], [-4.7560, -5.0496]],
[[-6.3785, -4.8365], [-5.5863, -5.4149], [-5.5639, -4.8469], [-6.1511, -4.0052]],
[[-6.0355, -3.7414], [-5.5968, -4.8061], [-5.4620, -4.7310], [-5.5864, -4.6078]],
[[-5.9493, -4.8963], [-4.4050, -5.4476], [-4.1755, -5.1395], [-4.0272, -4.3705]],
],
device=torch_device,
)
self.assertEqual(labels[0, :, 0].sum(), 270)
self.assertEqual(labels[0, :, 1].sum(), 647)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2))
def test_inference_speaker_verification(self):
model = UniSpeechSatForXVector.from_pretrained("microsoft/unispeech-sat-base-plus-sv").to(torch_device)
processor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base-plus-sv")
input_data = self._load_superb("si", 4)
inputs = processor(input_data["speech"], return_tensors="pt", padding=True)
labels = torch.tensor([5, 1, 1, 3], device=torch_device).T
with torch.no_grad():
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
outputs = model(input_values, attention_mask=attention_mask, labels=labels)
embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1)
cosine_sim = torch.nn.CosineSimilarity(dim=-1)
# id10002 vs id10002
self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).item(), 0.9671, 3)
# id10006 vs id10002
self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).item(), 0.4941, 3)
# id10002 vs id10004
self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3)
# TODO: update the tolerance after the CI moves to torch 1.10
self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/deberta/test_tokenization_deberta.py
|
# coding=utf-8
# Copyright 2019 Hugging Face inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class DebertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = DebertaTokenizer
test_rust_tokenizer = True
rust_tokenizer_class = DebertaTokenizerFast
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "[UNK]"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
text = "lower newer"
bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_token_type_ids(self):
tokenizer = self.get_tokenizer()
tokd = tokenizer("Hello", "World")
expected_token_type_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"], expected_token_type_ids)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/deberta-base")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode(
"sequence builders", add_special_tokens=True, add_prefix_space=False
)
encoded_pair_from_decode = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def test_tokenizer_integration(self):
tokenizer_classes = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained("microsoft/deberta-base")
sequences = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
encoding = tokenizer(sequences, padding=True)
decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]]
# fmt: off
expected_encoding = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
expected_decoded_sequence = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data, expected_encoding)
for expected, decoded in zip(expected_decoded_sequence, decoded_sequences):
self.assertEqual(expected, decoded)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/deberta/test_modeling_deberta.py
|
# coding=utf-8
# Copyright 2018 Microsoft Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class DebertaModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
relative_attention=False,
position_biased_input=True,
pos_att_type="None",
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.relative_attention = relative_attention
self.position_biased_input = position_biased_input
self.pos_att_type = pos_att_type
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return DebertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
relative_attention=self.relative_attention,
position_biased_input=self.position_biased_input,
pos_att_type=self.pos_att_type,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def check_loss_output(self, result):
self.parent.assertListEqual(list(result.loss.size()), [])
def create_and_check_deberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaModel(config=config)
model.to(torch_device)
model.eval()
sequence_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)[0]
sequence_output = model(input_ids, token_type_ids=token_type_ids)[0]
sequence_output = model(input_ids)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def create_and_check_deberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_deberta_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DebertaForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_deberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = DebertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_deberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DebertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class DebertaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
test_torchscript = False
test_pruning = False
test_head_masking = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = DebertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_deberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DebertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
@require_sentencepiece
@require_tokenizers
class DebertaModelIntegrationTest(unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
@slow
def test_inference_no_head(self):
model = DebertaModel.from_pretrained("microsoft/deberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]]
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f"{output[:, 1:4, 1:4]}")
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/deberta/test_modeling_tf_deberta.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import DebertaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
)
class TFDebertaModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 2
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.relative_attention = False
self.max_relative_positions = -1
self.position_biased_input = True
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = DebertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
relative_attention=self.relative_attention,
max_relative_positions=self.max_relative_positions,
position_biased_input=self.position_biased_input,
initializer_range=self.initializer_range,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDebertaModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDebertaForMaskedLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFDebertaForSequenceClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFDebertaForTokenClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDebertaForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFDebertaModel,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
)
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFDebertaModel,
"fill-mask": TFDebertaForMaskedLM,
"question-answering": TFDebertaForQuestionAnswering,
"text-classification": TFDebertaForSequenceClassification,
"token-classification": TFDebertaForTokenClassification,
"zero-shot": TFDebertaForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFDebertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base")
self.assertIsNotNone(model)
@require_tf
class TFDeBERTaModelIntegrationTest(unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
@slow
def test_inference_no_head(self):
model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
output = model(input_ids, attention_mask=attention_mask)[0]
expected_slice = tf.constant(
[
[
[-0.59855896, -0.80552566, -0.8462135],
[1.4484025, -0.93483794, -0.80593085],
[0.3122741, 0.00316059, -1.4131377],
]
]
)
tf.debugging.assert_near(output[:, 1:4, 1:4], expected_slice, atol=1e-4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import GPT2LMHeadModel
@require_torch
@require_sentencepiece
@require_tokenizers
class MegatronGPT2IntegrationTest(unittest.TestCase):
@slow
@unittest.skip("Model is not available.")
def test_inference_no_head(self):
directory = "nvidia/megatron-gpt2-345m/"
if "MYDIR" in os.environ:
directory = os.path.join(os.environ["MYDIR"], directory)
model = GPT2LMHeadModel.from_pretrained(directory)
model.to(torch_device)
model.half()
input_ids = torch.tensor(
[[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]],
device=torch_device,
dtype=torch.long,
)
with torch.no_grad():
output = model(input_ids).logits
expected_shape = torch.Size((1, 9, 50257))
self.assertEqual(output.shape, expected_shape)
expected_diag = torch.tensor(
[
4.9414,
-0.2920,
-1.2148,
-4.0273,
-0.5161,
-5.2109,
-1.2412,
-1.8301,
-1.7734,
-4.7148,
-0.2317,
-1.0811,
-2.1777,
0.4141,
-3.7969,
-4.0586,
-2.5332,
-3.3809,
4.3867,
],
device=torch_device,
dtype=torch.half,
)
for i in range(19):
r, c = 8 * i // 17, 2792 * i # along the diagonal
computed, expected = output[0, r, c], expected_diag[i]
msg = f"row={r} col={c} computed={computed} expected={expected}"
self.assertAlmostEqual(computed, expected, delta=1e-4, msg=msg)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/yoso/test_modeling_yoso.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch YOSO model. """
import unittest
from transformers import YosoConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
YosoForMaskedLM,
YosoForMultipleChoice,
YosoForQuestionAnswering,
YosoForSequenceClassification,
YosoForTokenClassification,
YosoModel,
)
from transformers.models.yoso.modeling_yoso import YOSO_PRETRAINED_MODEL_ARCHIVE_LIST
class YosoModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return YosoConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = YosoModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = YosoModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = YosoForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = YosoForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = YosoForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = YosoForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = YosoForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class YosoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
YosoModel,
YosoForMaskedLM,
YosoForMultipleChoice,
YosoForQuestionAnswering,
YosoForSequenceClassification,
YosoForTokenClassification,
)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": YosoModel,
"fill-mask": YosoForMaskedLM,
"question-answering": YosoForQuestionAnswering,
"text-classification": YosoForSequenceClassification,
"token-classification": YosoForTokenClassification,
"zero-shot": YosoForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = YosoModelTester(self)
self.config_tester = ConfigTester(self, config_class=YosoConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in YOSO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = YosoModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
return
@require_torch
class YosoModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = YosoModel.from_pretrained("uw-madison/yoso-4096")
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 6, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-0.0611, 0.1242, 0.0840], [0.0280, -0.0048, 0.1125], [0.0106, 0.0226, 0.0751]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_masked_lm(self):
model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096")
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
output = model(input_ids)[0]
vocab_size = 50265
expected_shape = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-2.1313, -3.7285, -2.2407], [-2.7047, -3.3314, -2.6408], [0.0629, -2.5166, -0.3356]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_masked_lm_long_input(self):
model = YosoForMaskedLM.from_pretrained("uw-madison/yoso-4096")
input_ids = torch.arange(4096).unsqueeze(0)
with torch.no_grad():
output = model(input_ids)[0]
vocab_size = 50265
expected_shape = torch.Size((1, 4096, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-2.3914, -4.3742, -5.0956], [-4.0988, -4.2384, -7.0406], [-3.1427, -3.7192, -6.6800]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/realm/test_tokenization_realm.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import RealmTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.realm.tokenization_realm import RealmTokenizer
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class RealmTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = RealmTokenizer
rust_tokenizer_class = RealmTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# With lower casing
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
rust_tokenizer = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
@slow
def test_batch_encode_candidates(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]]
encoded_sentence_r = tokenizer_r.batch_encode_candidates(text, max_length=10, return_tensors="np")
encoded_sentence_p = tokenizer_p.batch_encode_candidates(text, max_length=10, return_tensors="np")
expected_shape = (2, 2, 10)
self.assertEqual(encoded_sentence_r["input_ids"].shape, expected_shape)
self.assertEqual(encoded_sentence_r["attention_mask"].shape, expected_shape)
self.assertEqual(encoded_sentence_r["token_type_ids"].shape, expected_shape)
self.assertEqual(encoded_sentence_p["input_ids"].shape, expected_shape)
self.assertEqual(encoded_sentence_p["attention_mask"].shape, expected_shape)
self.assertEqual(encoded_sentence_p["token_type_ids"].shape, expected_shape)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/realm/test_modeling_realm.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch REALM model. """
import copy
import unittest
import numpy as np
from transformers import RealmConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
RealmEmbedder,
RealmForOpenQA,
RealmKnowledgeAugEncoder,
RealmReader,
RealmRetriever,
RealmScorer,
RealmTokenizer,
)
class RealmModelTester:
def __init__(
self,
parent,
batch_size=13,
retriever_proj_size=128,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
span_hidden_size=50,
max_span_width=10,
reader_layer_norm_eps=1e-3,
reader_beam_size=4,
reader_seq_len=288 + 32,
num_block_records=13353718,
searcher_beam_size=8,
searcher_seq_len=64,
num_labels=3,
num_choices=4,
num_candidates=10,
scope=None,
):
# General config
self.parent = parent
self.batch_size = batch_size
self.retriever_proj_size = retriever_proj_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
# Reader config
self.span_hidden_size = span_hidden_size
self.max_span_width = max_span_width
self.reader_layer_norm_eps = reader_layer_norm_eps
self.reader_beam_size = reader_beam_size
self.reader_seq_len = reader_seq_len
# Searcher config
self.num_block_records = num_block_records
self.searcher_beam_size = searcher_beam_size
self.searcher_seq_len = searcher_seq_len
self.num_labels = num_labels
self.num_choices = num_choices
self.num_candidates = num_candidates
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
candiate_input_ids = ids_tensor([self.batch_size, self.num_candidates, self.seq_length], self.vocab_size)
reader_input_ids = ids_tensor([self.reader_beam_size, self.reader_seq_len], self.vocab_size)
input_mask = None
candiate_input_mask = None
reader_input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
candiate_input_mask = random_attention_mask([self.batch_size, self.num_candidates, self.seq_length])
reader_input_mask = random_attention_mask([self.reader_beam_size, self.reader_seq_len])
token_type_ids = None
candidate_token_type_ids = None
reader_token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
candidate_token_type_ids = ids_tensor(
[self.batch_size, self.num_candidates, self.seq_length], self.type_vocab_size
)
reader_token_type_ids = ids_tensor([self.reader_beam_size, self.reader_seq_len], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
# inputs with additional num_candidates axis.
scorer_encoder_inputs = (candiate_input_ids, candiate_input_mask, candidate_token_type_ids)
# reader inputs
reader_inputs = (reader_input_ids, reader_input_mask, reader_token_type_ids)
return (
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(self):
return RealmConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
retriever_proj_size=self.retriever_proj_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_candidates=self.num_candidates,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def create_and_check_embedder(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmEmbedder(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.projected_score.shape, (self.batch_size, self.retriever_proj_size))
def create_and_check_encoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmKnowledgeAugEncoder(config=config)
model.to(torch_device)
model.eval()
relevance_score = floats_tensor([self.batch_size, self.num_candidates])
result = model(
scorer_encoder_inputs[0],
attention_mask=scorer_encoder_inputs[1],
token_type_ids=scorer_encoder_inputs[2],
relevance_score=relevance_score,
labels=token_labels,
)
self.parent.assertEqual(
result.logits.shape, (self.batch_size * self.num_candidates, self.seq_length, self.vocab_size)
)
def create_and_check_reader(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmReader(config=config)
model.to(torch_device)
model.eval()
relevance_score = floats_tensor([self.reader_beam_size])
result = model(
reader_inputs[0],
attention_mask=reader_inputs[1],
token_type_ids=reader_inputs[2],
relevance_score=relevance_score,
)
self.parent.assertEqual(result.block_idx.shape, ())
self.parent.assertEqual(result.candidate.shape, ())
self.parent.assertEqual(result.start_pos.shape, ())
self.parent.assertEqual(result.end_pos.shape, ())
def create_and_check_scorer(
self,
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
):
model = RealmScorer(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
candidate_input_ids=scorer_encoder_inputs[0],
candidate_attention_mask=scorer_encoder_inputs[1],
candidate_token_type_ids=scorer_encoder_inputs[2],
)
self.parent.assertEqual(result.relevance_score.shape, (self.batch_size, self.num_candidates))
self.parent.assertEqual(result.query_score.shape, (self.batch_size, self.retriever_proj_size))
self.parent.assertEqual(
result.candidate_score.shape, (self.batch_size, self.num_candidates, self.retriever_proj_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
scorer_encoder_inputs,
reader_inputs,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RealmModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RealmEmbedder,
RealmKnowledgeAugEncoder,
# RealmScorer is excluded from common tests as it is a container model
# consisting of two RealmEmbedders & a simple inner product calculation.
# RealmScorer
)
if is_torch_available()
else ()
)
all_generative_model_classes = ()
pipeline_model_mapping = {} if is_torch_available() else {}
# disable these tests because there is no base_model in Realm
test_save_load_fast_init_from_base = False
test_save_load_fast_init_to_base = False
def setUp(self):
self.test_pruning = False
self.model_tester = RealmModelTester(self)
self.config_tester = ConfigTester(self, config_class=RealmConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_embedder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_embedder(*config_and_inputs)
def test_encoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoder(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_embedder(*config_and_inputs)
self.model_tester.create_and_check_encoder(*config_and_inputs)
def test_scorer(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_scorer(*config_and_inputs)
def test_training(self):
if not self.model_tester.is_training:
return
config, *inputs = self.model_tester.prepare_config_and_inputs()
input_ids, token_type_ids, input_mask, scorer_encoder_inputs = inputs[0:4]
config.return_dict = True
tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa")
# RealmKnowledgeAugEncoder training
model = RealmKnowledgeAugEncoder(config)
model.to(torch_device)
model.train()
inputs_dict = {
"input_ids": scorer_encoder_inputs[0].to(torch_device),
"attention_mask": scorer_encoder_inputs[1].to(torch_device),
"token_type_ids": scorer_encoder_inputs[2].to(torch_device),
"relevance_score": floats_tensor([self.model_tester.batch_size, self.model_tester.num_candidates]),
}
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs = inputs_dict
loss = model(**inputs).loss
loss.backward()
# RealmForOpenQA training
openqa_config = copy.deepcopy(config)
openqa_config.vocab_size = 30522 # the retrieved texts will inevitably have more than 99 vocabs.
openqa_config.num_block_records = 5
openqa_config.searcher_beam_size = 2
block_records = np.array(
[
b"This is the first record.",
b"This is the second record.",
b"This is the third record.",
b"This is the fourth record.",
b"This is the fifth record.",
],
dtype=object,
)
retriever = RealmRetriever(block_records, tokenizer)
model = RealmForOpenQA(openqa_config, retriever)
model.to(torch_device)
model.train()
inputs_dict = {
"input_ids": input_ids[:1].to(torch_device),
"attention_mask": input_mask[:1].to(torch_device),
"token_type_ids": token_type_ids[:1].to(torch_device),
"answer_ids": input_ids[:1].tolist(),
}
inputs = self._prepare_for_class(inputs_dict, RealmForOpenQA)
loss = model(**inputs).reader_output.loss
loss.backward()
# Test model.block_embedding_to
device = torch.device("cpu")
model.block_embedding_to(device)
loss = model(**inputs).reader_output.loss
loss.backward()
self.assertEqual(model.block_emb.device.type, device.type)
@slow
def test_embedder_from_pretrained(self):
model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder")
self.assertIsNotNone(model)
@slow
def test_encoder_from_pretrained(self):
model = RealmKnowledgeAugEncoder.from_pretrained("google/realm-cc-news-pretrained-encoder")
self.assertIsNotNone(model)
@slow
def test_open_qa_from_pretrained(self):
model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa")
self.assertIsNotNone(model)
@slow
def test_reader_from_pretrained(self):
model = RealmReader.from_pretrained("google/realm-orqa-nq-reader")
self.assertIsNotNone(model)
@slow
def test_scorer_from_pretrained(self):
model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer")
self.assertIsNotNone(model)
@require_torch
class RealmModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_embedder(self):
retriever_projected_size = 128
model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder")
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, retriever_projected_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[-0.0714, -0.0837, -0.1314]])
self.assertTrue(torch.allclose(output[:, :3], expected_slice, atol=1e-4))
@slow
def test_inference_encoder(self):
num_candidates = 2
vocab_size = 30522
model = RealmKnowledgeAugEncoder.from_pretrained(
"google/realm-cc-news-pretrained-encoder", num_candidates=num_candidates
)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
relevance_score = torch.tensor([[0.3, 0.7]], dtype=torch.float32)
output = model(input_ids, relevance_score=relevance_score)[0]
expected_shape = torch.Size((2, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[-11.0888, -11.2544], [-10.2170, -10.3874]]])
self.assertTrue(torch.allclose(output[1, :2, :2], expected_slice, atol=1e-4))
@slow
def test_inference_open_qa(self):
from transformers.models.realm.retrieval_realm import RealmRetriever
tokenizer = RealmTokenizer.from_pretrained("google/realm-orqa-nq-openqa")
retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa")
model = RealmForOpenQA.from_pretrained(
"google/realm-orqa-nq-openqa",
retriever=retriever,
)
question = "Who is the pioneer in modern computer science?"
question = tokenizer(
[question],
padding=True,
truncation=True,
max_length=model.config.searcher_seq_len,
return_tensors="pt",
).to(model.device)
predicted_answer_ids = model(**question).predicted_answer_ids
predicted_answer = tokenizer.decode(predicted_answer_ids)
self.assertEqual(predicted_answer, "alan mathison turing")
@slow
def test_inference_reader(self):
config = RealmConfig(reader_beam_size=2, max_span_width=3)
model = RealmReader.from_pretrained("google/realm-orqa-nq-reader", config=config)
concat_input_ids = torch.arange(10).view((2, 5))
concat_token_type_ids = torch.tensor([[0, 0, 1, 1, 1], [0, 0, 1, 1, 1]], dtype=torch.int64)
concat_block_mask = torch.tensor([[0, 0, 1, 1, 0], [0, 0, 1, 1, 0]], dtype=torch.int64)
relevance_score = torch.tensor([0.3, 0.7], dtype=torch.float32)
output = model(
concat_input_ids,
token_type_ids=concat_token_type_ids,
relevance_score=relevance_score,
block_mask=concat_block_mask,
return_dict=True,
)
block_idx_expected_shape = torch.Size(())
start_pos_expected_shape = torch.Size((1,))
end_pos_expected_shape = torch.Size((1,))
self.assertEqual(output.block_idx.shape, block_idx_expected_shape)
self.assertEqual(output.start_pos.shape, start_pos_expected_shape)
self.assertEqual(output.end_pos.shape, end_pos_expected_shape)
expected_block_idx = torch.tensor(1)
expected_start_pos = torch.tensor(3)
expected_end_pos = torch.tensor(3)
self.assertTrue(torch.allclose(output.block_idx, expected_block_idx, atol=1e-4))
self.assertTrue(torch.allclose(output.start_pos, expected_start_pos, atol=1e-4))
self.assertTrue(torch.allclose(output.end_pos, expected_end_pos, atol=1e-4))
@slow
def test_inference_scorer(self):
num_candidates = 2
model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=num_candidates)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
candidate_input_ids = torch.tensor([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
output = model(input_ids, candidate_input_ids=candidate_input_ids)[0]
expected_shape = torch.Size((1, 2))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[0.7410, 0.7170]])
self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/realm/test_retrieval_realm.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class RealmRetrieverTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.num_block_records = 5
# Realm tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
realm_tokenizer_path = os.path.join(self.tmpdirname, "realm_tokenizer")
os.makedirs(realm_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(realm_tokenizer_path, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
realm_block_records_path = os.path.join(self.tmpdirname, "realm_block_records")
os.makedirs(realm_block_records_path, exist_ok=True)
def get_tokenizer(self) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, "realm_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_config(self):
config = RealmConfig(num_block_records=self.num_block_records)
return config
def get_dummy_dataset(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
}
)
return dataset
def get_dummy_block_records(self):
block_records = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
],
dtype=object,
)
return block_records
def get_dummy_retriever(self):
retriever = RealmRetriever(
block_records=self.get_dummy_block_records(),
tokenizer=self.get_tokenizer(),
)
return retriever
def test_retrieve(self):
config = self.get_config()
retriever = self.get_dummy_retriever()
tokenizer = retriever.tokenizer
retrieved_block_ids = np.array([0, 3], dtype="long")
question_input_ids = tokenizer(["Test question"]).input_ids
answer_ids = tokenizer(
["the fourth"],
add_special_tokens=False,
return_token_type_ids=False,
return_attention_mask=False,
).input_ids
max_length = config.reader_seq_len
has_answers, start_pos, end_pos, concat_inputs = retriever(
retrieved_block_ids, question_input_ids, answer_ids=answer_ids, max_length=max_length, return_tensors="np"
)
self.assertEqual(len(has_answers), 2)
self.assertEqual(len(start_pos), 2)
self.assertEqual(len(end_pos), 2)
self.assertEqual(concat_inputs.input_ids.shape, (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]),
["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"],
)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]),
["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"],
)
def test_block_has_answer(self):
config = self.get_config()
retriever = self.get_dummy_retriever()
tokenizer = retriever.tokenizer
retrieved_block_ids = np.array([0, 3, 5], dtype="long")
question_input_ids = tokenizer(["Test question"]).input_ids
answer_ids = tokenizer(
["the fourth", "longer longer"],
add_special_tokens=False,
return_token_type_ids=False,
return_attention_mask=False,
).input_ids
max_length = config.reader_seq_len
has_answers, start_pos, end_pos, _ = retriever(
retrieved_block_ids, question_input_ids, answer_ids=answer_ids, max_length=max_length, return_tensors="np"
)
self.assertEqual([False, True, True], has_answers)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], start_pos)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], end_pos)
def test_save_load_pretrained(self):
retriever = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
# Test local path
retriever = retriever.from_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
self.assertEqual(retriever.block_records[0], b"This is the first record")
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download:
mock_hf_hub_download.return_value = os.path.join(
os.path.join(self.tmpdirname, "realm_block_records"), _REALM_BLOCK_RECORDS_FILENAME
)
retriever = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa")
self.assertEqual(retriever.block_records[0], b"This is the first record")
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/mistral/test_modeling_mistral.py
|
# coding=utf-8
# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Mistral model. """
import gc
import tempfile
import unittest
import pytest
from transformers import AutoTokenizer, MistralConfig, is_torch_available
from transformers.testing_utils import (
backend_empty_cache,
require_bitsandbytes,
require_flash_attn,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MistralForCausalLM,
MistralForSequenceClassification,
MistralModel,
)
class MistralModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return MistralConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model with Llama->Mistral
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MistralModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_model_as_decoder with Llama->Mistral
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = MistralModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_for_causal_lm with Llama->Mistral
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = MistralForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.create_and_check_decoder_model_past_large_inputs with Llama->Mistral
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = MistralForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTester.prepare_config_and_inputs_for_common
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class MistralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(MistralModel, MistralForCausalLM, MistralForSequenceClassification) if is_torch_available() else ()
)
all_generative_model_classes = (MistralForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": MistralModel,
"text-classification": MistralForSequenceClassification,
"text-generation": MistralForCausalLM,
"zero-shot": MistralForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
return True
def setUp(self):
self.model_tester = MistralModelTester(self)
self.config_tester = ConfigTester(self, config_class=MistralConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_Mistral_sequence_classification_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
print(config)
config.num_labels = 3
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = MistralForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_Mistral_sequence_classification_model_for_single_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "single_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
model = MistralForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def test_Mistral_sequence_classification_model_for_multi_label(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.problem_type = "multi_label_classification"
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
sequence_labels = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size
).to(torch.float)
model = MistralForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Mistral buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip("Mistral uses GQA on all models so the KV cache is a non standard format")
def test_past_key_values_format(self):
pass
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_generate_padding_right(self):
import torch
for model_class in self.all_generative_model_classes:
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(
tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=False, low_cpu_mem_usage=True
).to(torch_device)
dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device)
dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [1, 1, 1, 0]]).to(torch_device)
model.generate(dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False)
model = model_class.from_pretrained(
tmpdirname, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True
).to(torch_device)
with self.assertRaises(ValueError):
_ = model.generate(
dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False
)
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_generate_use_cache(self):
import torch
max_new_tokens = 30
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
dummy_input = inputs_dict[model_class.main_input_name]
if dummy_input.dtype in [torch.float32, torch.bfloat16]:
dummy_input = dummy_input.to(torch.float16)
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input))
# NOTE: Mistral apparently does not support right padding + use_cache with FA2.
dummy_attention_mask[:, -1] = 1
model = model_class.from_pretrained(
tmpdirname,
torch_dtype=torch.float16,
use_flash_attention_2=True,
low_cpu_mem_usage=True,
).to(torch_device)
# Just test that a large cache works as expected
_ = model.generate(
dummy_input,
attention_mask=dummy_attention_mask,
max_new_tokens=max_new_tokens,
do_sample=False,
use_cache=True,
)
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_padding_right(self):
self.skipTest("Mistral flash attention does not support right padding")
@require_torch
class MistralIntegrationTest(unittest.TestCase):
@slow
def test_model_7b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto")
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.5548, -2.5737, -3.0600, -2.5906, -2.8478, -2.8118, -2.9325, -2.7694]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([-5.8781, -5.8616, -0.1052, -4.7200, -5.8781, -5.8774, -5.8773, -5.8777, -5.8781, -5.8780, -5.8781, -5.8779, -1.0787, 1.7583, -5.8779, -5.8780, -5.8783, -5.8778, -5.8776, -5.8781, -5.8784, -5.8778, -5.8778, -5.8777, -5.8779, -5.8778, -5.8776, -5.8780, -5.8779, -5.8781]) # fmt: skip
print(out[0, 0, :30])
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
del model
backend_empty_cache(torch_device)
gc.collect()
@slow
def test_model_7b_generation(self):
EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. I’m not a big"""
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto")
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@require_bitsandbytes
@slow
@require_flash_attn
def test_model_7b_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1",
device_map="auto",
load_in_4bit=True,
use_flash_attention_2=True,
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
del assistant_model
del model
backend_empty_cache(torch_device)
gc.collect()
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clap/test_processor_clap.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class ClapProcessorTest(unittest.TestCase):
def setUp(self):
self.checkpoint = "laion/clap-htsat-unfused"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return RobertaTokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return ClapFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, RobertaTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, ClapFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = ClapProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, RobertaTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, ClapFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(audios=raw_speech, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClapProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
self.assertListEqual(
processor.model_input_names[2:],
feature_extractor.model_input_names,
msg="`processor` and `feature_extractor` model input names do not match",
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clap/test_modeling_clap.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch CLAP model. """
import inspect
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import ClapAudioConfig, ClapConfig, ClapProcessor, ClapTextConfig
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapTextModel,
ClapTextModelWithProjection,
)
from transformers.models.clap.modeling_clap import CLAP_PRETRAINED_MODEL_ARCHIVE_LIST
class ClapAudioModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=60,
num_mel_bins=16,
window_size=4,
spec_size=64,
patch_size=2,
patch_stride=2,
seq_length=16,
freq_ratio=2,
num_channels=3,
is_training=True,
hidden_size=32,
patch_embeds_hidden_size=16,
projection_dim=32,
depths=[2, 2],
num_hidden_layers=2,
num_heads=[2, 2],
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_mel_bins = num_mel_bins
self.window_size = window_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.depths = depths
self.num_heads = num_heads
self.num_attention_heads = num_heads[0]
self.seq_length = seq_length
self.spec_size = spec_size
self.freq_ratio = freq_ratio
self.patch_stride = patch_stride
self.patch_embeds_hidden_size = patch_embeds_hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_features = floats_tensor([self.batch_size, 1, self.hidden_size, self.num_mel_bins])
config = self.get_config()
return config, input_features
def get_config(self):
return ClapAudioConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_mel_bins=self.num_mel_bins,
window_size=self.window_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
patch_stride=self.patch_stride,
projection_dim=self.projection_dim,
depths=self.depths,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
spec_size=self.spec_size,
freq_ratio=self.freq_ratio,
patch_embeds_hidden_size=self.patch_embeds_hidden_size,
)
def create_and_check_model(self, config, input_features):
model = ClapAudioModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_features)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_projection(self, config, input_features):
model = ClapAudioModelWithProjection(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_features)
self.parent.assertEqual(result.audio_embeds.shape, (self.batch_size, self.projection_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_features = config_and_inputs
inputs_dict = {"input_features": input_features}
return config, inputs_dict
@require_torch
class ClapAudioModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLAP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (ClapAudioModel, ClapAudioModelWithProjection) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = ClapAudioModelTester(self)
self.config_tester = ConfigTester(self, config_class=ClapAudioConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="ClapAudioModel does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[2 * self.model_tester.patch_embeds_hidden_size, 2 * self.model_tester.patch_embeds_hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_features"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_projection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass")
def test_training(self):
pass
@unittest.skip(reason="ClapAudioModel does not output any loss term in the forward pass")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="ClapAudioModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ClapAudioModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
def test_model_with_projection_from_pretrained(self):
for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ClapAudioModelWithProjection.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertTrue(hasattr(model, "audio_projection"))
class ClapTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
projection_hidden_act="relu",
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.projection_hidden_act = projection_hidden_act
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return ClapTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
projection_hidden_act=self.projection_hidden_act,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = ClapTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_projection(self, config, input_ids, input_mask):
model = ClapTextModelWithProjection(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ClapTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ClapTextModel, ClapTextModelWithProjection) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = ClapTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=ClapTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_with_projection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass")
def test_training(self):
pass
@unittest.skip(reason="ClapTextModel does not output any loss term in the forward pass")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="ClapTextModel does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ClapTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="ClapTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ClapTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
def test_model_with_projection_from_pretrained(self):
for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ClapTextModelWithProjection.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertTrue(hasattr(model, "text_projection"))
class ClapModelTester:
def __init__(self, parent, text_kwargs=None, audio_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if audio_kwargs is None:
audio_kwargs = {}
self.parent = parent
self.text_model_tester = ClapTextModelTester(parent, **text_kwargs)
self.audio_model_tester = ClapAudioModelTester(parent, **audio_kwargs)
self.is_training = is_training
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
_, input_features = self.audio_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, input_features
def get_config(self):
return ClapConfig.from_text_audio_configs(
self.text_model_tester.get_config(), self.audio_model_tester.get_config(), projection_dim=64
)
def create_and_check_model(self, config, input_ids, attention_mask, input_features):
model = ClapModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, input_features, attention_mask)
self.parent.assertEqual(
result.logits_per_audio.shape, (self.audio_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.audio_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, input_features = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"input_features": input_features,
"return_loss": True,
}
return config, inputs_dict
@require_torch
class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ClapModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": ClapModel} if is_torch_available() else {}
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = ClapModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="ClapModel does not have input/output embeddings")
def test_model_common_attributes(self):
pass
# override as the `logit_scale` parameter initilization is different for CLAP
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
np.log(1 / 0.07),
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
input_features = inputs_dict["input_features"] # CLAP needs input_features
traced_model = torch.jit.trace(model, (input_ids, input_features))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict.keys():
if key not in model_state_dict.keys():
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_load_audio_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save ClapConfig and check if we can load ClapAudioConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
audio_config = ClapAudioConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.audio_config.to_dict(), audio_config.to_dict())
# Save ClapConfig and check if we can load ClapTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = ClapTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
for model_name in CLAP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ClapModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch
class ClapModelIntegrationTest(unittest.TestCase):
paddings = ["repeatpad", "repeat", "pad"]
def test_integration_unfused(self):
EXPECTED_MEANS_UNFUSED = {
"repeatpad": 0.0024,
"pad": 0.0020,
"repeat": 0.0023,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = librispeech_dummy[-1]
model_id = "laion/clap-htsat-unfused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding).to(
torch_device
)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_UNFUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
def test_integration_fused(self):
EXPECTED_MEANS_FUSED = {
"repeatpad": 0.00069,
"repeat": 0.00196,
"pad": -0.000379,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = librispeech_dummy[-1]
model_id = "laion/clap-htsat-fused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(
audios=audio_sample["audio"]["array"], return_tensors="pt", padding=padding, truncation="fusion"
).to(torch_device)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_FUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
def test_batched_fused(self):
EXPECTED_MEANS_FUSED = {
"repeatpad": 0.0010,
"repeat": 0.0020,
"pad": 0.0006,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]]
model_id = "laion/clap-htsat-fused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding, truncation="fusion").to(
torch_device
)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_FUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
def test_batched_unfused(self):
EXPECTED_MEANS_FUSED = {
"repeatpad": 0.0016,
"repeat": 0.0019,
"pad": 0.0019,
}
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_samples = [sample["array"] for sample in librispeech_dummy[0:4]["audio"]]
model_id = "laion/clap-htsat-unfused"
model = ClapModel.from_pretrained(model_id).to(torch_device)
processor = ClapProcessor.from_pretrained(model_id)
for padding in self.paddings:
inputs = processor(audios=audio_samples, return_tensors="pt", padding=padding).to(torch_device)
audio_embed = model.get_audio_features(**inputs)
expected_mean = EXPECTED_MEANS_FUSED[padding]
self.assertTrue(
torch.allclose(audio_embed.cpu().mean(), torch.tensor([expected_mean]), atol=1e-3, rtol=1e-3)
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clap/test_feature_extraction_clap.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import unittest
import numpy as np
from datasets import load_dataset
from transformers import ClapFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.trainer_utils import set_seed
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
global_rng = random.Random()
# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester with Whisper->Clap
class ClapFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=10,
hop_length=160,
chunk_length=8,
padding_value=0.0,
sampling_rate=4_000,
return_attention_mask=False,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
self.feature_size = feature_size
self.chunk_length = chunk_length
self.hop_length = hop_length
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class ClapFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = ClapFeatureExtractor
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.setUp with Whisper->Clap
def setUp(self):
self.feat_extract_tester = ClapFeatureExtractionTester(self)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features
self.assertTrue(input_features.ndim == 4)
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.float32)
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest._load_datasamples
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_integration_fusion_short_input(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
# "repeat"
[
-20.1049, -19.9764, -20.0731, -19.5055, -27.5018, -22.5761, -26.6071,
-29.0091, -26.4659, -26.4236, -28.8808, -31.9190, -32.4848, -34.1186,
-34.0340, -32.8803, -30.9895, -37.6238, -38.0347, -40.6263, -36.3496,
-42.2533, -32.9132, -27.7068, -29.3704, -30.3208, -22.5972, -27.1494,
-30.1975, -31.1005, -29.9372, -27.1917, -25.9806, -30.3489, -33.2380,
-31.9062, -36.5498, -32.8721, -30.5629, -27.4674, -22.2232, -22.5653,
-16.3868, -17.2713, -25.9738, -30.6256, -34.3766, -31.1292, -27.8950,
-27.0588, -25.6206, -23.0712, -26.6050, -28.0112, -32.6847, -34.3396,
-34.9738, -35.8463, -39.2324, -37.1188, -33.3705, -28.9230, -28.9112,
-28.6578
],
[
-36.7233, -30.0587, -24.8431, -18.4611, -16.8149, -23.9319, -32.8580,
-34.2264, -27.4332, -26.8027, -29.2721, -33.9033, -39.3403, -35.3232,
-26.8076, -28.6460, -35.2780, -36.0738, -35.4996, -37.7631, -39.5056,
-34.7112, -36.8741, -34.1066, -32.9474, -33.6604, -27.9937, -30.9594,
-26.2928, -32.0485, -29.2151, -29.2917, -32.7308, -29.6542, -31.1454,
-37.0088, -32.3388, -37.3086, -31.1024, -27.2889, -19.6788, -21.1488,
-19.5144, -14.8889, -21.2006, -24.7488, -27.7940, -31.1058, -27.5068,
-21.5737, -22.3780, -21.5151, -26.3086, -30.9223, -33.5043, -32.0307,
-37.3806, -41.6188, -45.6650, -40.5131, -32.5023, -26.7385, -26.3709,
-26.7761
]
],
[
# "repeatpad"
[
-25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456,
-33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575,
-40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368,
-36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010,
-34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377,
-38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952,
-34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451,
-34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136,
-40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035,
-31.6112
],
[
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100.
]
],
[
# None, same as "repeatpad"
[
-25.7496, -24.9339, -24.1357, -23.1271, -23.7853, -26.1264, -29.1456,
-33.2060, -37.8179, -42.4833, -41.9386, -41.2164, -42.3566, -44.2575,
-40.0217, -36.6794, -36.6974, -38.7819, -42.0880, -45.5560, -39.9368,
-36.3219, -35.5981, -36.6434, -35.1851, -33.0684, -30.0437, -30.2010,
-34.3476, -42.1373, -38.8039, -37.3355, -40.4576, -41.0485, -40.6377,
-38.2275, -42.7481, -34.6084, -34.7048, -29.5149, -26.3935, -26.8952,
-34.1336, -26.2904, -28.2571, -32.5642, -36.7240, -35.5334, -38.2451,
-34.8177, -28.9754, -25.1096, -27.9768, -32.3184, -37.0269, -40.5136,
-40.8061, -36.4948, -40.3767, -38.9671, -38.3552, -34.1250, -30.9035,
-31.6112
],
[
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100.
]
],
[
# "pad"
[
-58.5260, -58.1155, -57.8623, -57.5059, -57.9178, -58.7171, -59.2343,
-59.9833, -60.9764, -62.0722, -63.5723, -65.7111, -67.5153, -68.7088,
-69.8325, -70.2987, -70.1548, -70.6233, -71.5702, -72.5159, -72.3821,
-70.1817, -67.0315, -64.1387, -62.2202, -61.0717, -60.4951, -61.6005,
-63.7358, -67.1400, -67.6185, -65.5635, -64.3593, -63.7138, -63.6209,
-66.4950, -72.6284, -63.3961, -56.8334, -52.7319, -50.6310, -51.3728,
-53.5619, -51.9190, -50.9708, -52.8684, -55.8073, -58.8227, -60.6991,
-57.0547, -52.7611, -51.4388, -54.4892, -60.8950, -66.1024, -72.4352,
-67.8538, -65.1463, -68.7588, -72.3080, -68.4864, -60.4688, -57.1516,
-60.9460
],
[
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100.
]
]
]
)
# fmt: on
MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]]
input_speech = self._load_datasamples(1)
feature_extractor = ClapFeatureExtractor()
for padding, EXPECTED_VALUES, idx_in_mel in zip(
["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN
):
input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features
self.assertEqual(input_features.shape, (1, 4, 1001, 64))
self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], atol=1e-4))
self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], atol=1e-4))
self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 1]))
self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 2]))
self.assertTrue(torch.all(input_features[0, 0] == input_features[0, 3]))
def test_integration_rand_trunc_short_input(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
# "repeat"
[
-35.0483, -35.7865, -38.2884, -40.0220, -42.5349, -44.9489, -43.2228,
-44.6499, -47.6253, -49.6983, -50.2127, -52.5483, -52.2223, -51.9157,
-49.4082, -51.2024, -57.0476, -56.2803, -58.1618, -60.7474, -55.0389,
-60.9514, -59.3080, -50.4419, -47.8172, -48.7570, -55.2552, -44.5036,
-44.1148, -50.8218, -51.0968, -52.9408, -51.1037, -48.9789, -47.5897,
-52.0915, -55.4216, -54.1529, -58.0149, -58.0866, -52.7798, -52.6154,
-45.9144, -46.2008, -40.7603, -41.1703, -50.2250, -55.4112, -59.4818,
-54.5795, -53.5552, -51.3668, -49.8358, -50.3186, -54.0452, -57.6030,
-61.1589, -61.6415, -63.2756, -66.5890, -62.8543, -58.0665, -56.7203,
-56.7632
],
[
-47.1320, -37.9961, -34.0076, -36.7109, -47.9057, -48.4924, -43.8371,
-44.9728, -48.1689, -52.9141, -57.6077, -52.8520, -44.8502, -45.6764,
-51.8389, -56.4284, -54.6972, -53.4889, -55.6077, -58.7149, -60.3760,
-54.0136, -56.0730, -55.9870, -54.4017, -53.1094, -53.5640, -50.3064,
-49.9520, -49.3239, -48.1668, -53.4852, -50.4561, -50.8688, -55.1970,
-51.5538, -53.0260, -59.6933, -54.8183, -59.5895, -55.9589, -50.3761,
-44.1282, -44.1463, -43.8540, -39.1168, -45.3893, -49.5542, -53.1505,
-55.2870, -50.3921, -46.8511, -47.4444, -49.5633, -56.0034, -59.0815,
-59.0018, -63.7589, -69.5745, -71.5789, -64.0498, -56.0558, -54.3475,
-54.7004
]
],
[
# "repeatpad"
[
-40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297,
-60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865,
-54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941,
-56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141,
-50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247,
-62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084,
-49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358,
-60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610,
-66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972,
-59.3139
],
[
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100.
]
],
[
# None, same as "repeatpad"
[
-40.3184, -39.7186, -39.8807, -41.6508, -45.3613, -50.4785, -57.0297,
-60.4944, -59.1642, -58.9495, -60.4661, -62.5300, -58.4759, -55.2865,
-54.8973, -56.0780, -57.5482, -59.6557, -64.3309, -65.0330, -59.4941,
-56.8552, -55.0519, -55.9817, -56.9739, -55.2827, -54.5312, -51.4141,
-50.4289, -51.9131, -57.5821, -63.9979, -59.9180, -58.9489, -62.3247,
-62.6975, -63.7948, -60.5250, -64.6107, -58.7905, -57.0229, -54.3084,
-49.8445, -50.4459, -57.0172, -50.6425, -52.5992, -57.4207, -61.6358,
-60.6540, -63.1968, -57.4360, -52.3263, -51.7695, -57.1946, -62.9610,
-66.7359, -67.0335, -63.7440, -68.1775, -66.3798, -62.8650, -59.8972,
-59.3139
],
[
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100.
]
],
[
# "pad"
[
-73.3190, -73.6349, -74.1451, -74.8539, -75.7476, -76.5438, -78.5540,
-80.1339, -81.8911, -83.7560, -85.5387, -86.7466, -88.2072, -88.6090,
-88.8243, -89.0784, -89.4364, -89.8179, -91.3146, -92.2833, -91.7221,
-90.9440, -88.1315, -86.2425, -84.2281, -82.4893, -81.5993, -81.1328,
-81.5759, -83.1068, -85.6525, -88.9520, -88.9187, -87.2703, -86.3052,
-85.7188, -85.8802, -87.9996, -95.0464, -88.0133, -80.8561, -76.5597,
-74.2816, -74.8109, -77.3615, -76.0719, -75.3426, -77.6428, -80.9663,
-84.5275, -84.9907, -80.5205, -77.2851, -78.6259, -84.7740, -91.4535,
-98.1894, -94.3872, -92.3735, -97.6807, -98.1501, -91.4344, -85.2842,
-88.4338
],
[
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
-100., -100., -100., -100.
]
]
]
)
# fmt: on
MEL_BIN = [[976, 977], [976, 977], [976, 977], [196, 197]]
input_speech = self._load_datasamples(1)
feature_extractor = ClapFeatureExtractor()
for padding, EXPECTED_VALUES, idx_in_mel in zip(
["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, MEL_BIN
):
input_features = feature_extractor(
input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding
).input_features
self.assertEqual(input_features.shape, (1, 1, 1001, 64))
self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], atol=1e-4))
self.assertTrue(torch.allclose(input_features[0, 0, idx_in_mel[1]], EXPECTED_VALUES[1], atol=1e-4))
def test_integration_fusion_long_input(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
-11.1830, -10.1894, -8.6051, -4.8578, -1.3268, -8.4606, -14.5453,
-9.2017, 0.5781, 16.2129, 14.8289, 3.6326, -3.8794, -6.5544,
-2.4408, 1.9531, 6.0967, 1.7590, -7.6730, -6.1571, 2.0052,
16.6694, 20.6447, 21.2145, 13.4972, 15.9043, 16.8987, 4.1766,
11.9428, 21.2372, 12.3016, 4.8604, 6.7241, 1.8543, 4.9235,
5.3188, -0.9897, -1.2416, -6.5864, 2.9529, 2.9274, 6.4753,
10.2300, 11.2127, 3.4042, -1.0055, -6.0475, -6.7524, -3.9801,
-1.4434, 0.4740, -0.1584, -4.5457, -8.5746, -8.8428, -13.1475,
-9.6079, -8.5798, -4.1143, -3.7966, -7.1651, -6.1517, -8.0258,
-12.1486
],
[
-10.2017, -7.9924, -5.9517, -3.9372, -1.9735, -4.3130, 16.1647,
25.0592, 23.5532, 14.4974, -7.0778, -10.2262, 6.4782, 20.3454,
19.4269, 1.7976, -16.5070, 4.9380, 12.3390, 6.9285, -13.6325,
-8.5298, 1.0839, -5.9629, -8.4812, 3.1331, -2.0963, -16.6046,
-14.0070, -17.5707, -13.2080, -17.2168, -17.7770, -12.1111, -18.6184,
-17.1897, -13.9801, -12.0426, -23.5400, -25.6823, -23.5813, -18.7847,
-20.5473, -25.6458, -19.7585, -27.6007, -28.9276, -24.8948, -25.4458,
-22.2807, -19.6613, -19.2669, -15.7813, -19.6821, -24.3439, -22.2598,
-28.2631, -30.1017, -32.7646, -33.6525, -27.5639, -22.0548, -27.8054,
-29.6947
],
[
-9.2078, -7.2963, -6.2095, -7.9959, -2.9280, -11.1843, -6.1490,
5.0733, 19.2957, 21.4578, 14.6803, -3.3153, -6.3334, -2.3542,
6.9509, 15.2965, 14.6620, 5.2075, -0.0873, 1.1919, 18.1986,
20.8470, 10.8035, 2.2516, 7.6905, 7.7427, -1.2543, -5.0018,
0.9809, -2.1584, -5.4580, -5.4760, -11.8888, -9.0605, -8.4638,
-9.9897, -0.0540, -5.1629, 0.0483, -4.1504, -4.8140, -7.8236,
-9.0622, -10.1742, -8.9597, -11.5380, -16.5603, -17.1858, -17.5032,
-20.9326, -23.9543, -25.2602, -25.3429, -27.4536, -26.8859, -22.7852,
-25.8288, -24.8399, -23.8893, -24.2096, -26.5415, -23.7281, -25.6851,
-22.3629
],
[
1.3448, 2.9883, 4.0366, -0.8019, -10.4191, -10.0883, -4.3812,
0.8136, 2.1579, 0.0832, 1.0949, -0.9759, -5.5319, -4.6009,
-6.5452, -14.9155, -20.1584, -9.3611, -2.4271, 1.4031, 4.9910,
8.6916, 8.6785, 10.1973, 9.9029, 5.3840, 7.5336, 5.2803,
2.8144, -0.3138, 2.2216, 5.7328, 7.5574, 7.7402, 1.0681,
3.1049, 7.0742, 6.5588, 7.3712, 5.7881, 8.6874, 8.7725,
2.8133, -4.5809, -6.1317, -5.1719, -5.0192, -9.0977, -10.9391,
-6.0769, 1.6016, -0.8965, -7.2252, -7.8632, -11.4468, -11.7446,
-10.7447, -7.0601, -2.7748, -4.1798, -2.8433, -3.1352, 0.8097,
6.4212
]
]
)
# fmt: on
MEL_BIN = 963
input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])
feature_extractor = ClapFeatureExtractor()
for padding, EXPECTED_VALUES, block_idx in zip(
["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, [1, 2, 0, 3]
):
set_seed(987654321)
input_features = feature_extractor(input_speech, return_tensors="pt", padding=padding).input_features
self.assertEqual(input_features.shape, (1, 4, 1001, 64))
self.assertTrue(torch.allclose(input_features[0, block_idx, MEL_BIN], EXPECTED_VALUES, atol=1e-3))
def test_integration_rand_trunc_long_input(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
-35.4022, -32.7555, -31.2004, -32.7764, -42.5770, -41.6339, -43.1630,
-44.5080, -44.3029, -48.9628, -39.5022, -39.2105, -43.1350, -43.2195,
-48.4894, -52.2344, -57.6891, -52.2228, -45.5155, -44.2893, -43.4697,
-46.6702, -43.7490, -40.4819, -42.7275, -46.3434, -46.8412, -41.2003,
-43.1681, -46.2948, -46.1925, -47.8333, -45.6812, -44.9182, -41.7786,
-43.3809, -44.3199, -42.8814, -45.4771, -46.7114, -46.9746, -42.7090,
-41.6057, -38.3965, -40.1980, -41.0263, -34.1256, -28.3289, -29.0201,
-30.4453, -29.5561, -30.1734, -25.9406, -19.0897, -15.8452, -20.1351,
-23.6515, -23.1194, -17.1845, -19.4399, -23.6527, -22.8768, -20.7279,
-22.7864
],
[
-35.7719, -27.2566, -23.6964, -27.5521, 0.2510, 7.4391, 1.3917,
-13.3417, -28.1758, -17.0856, -5.7723, -0.8000, -7.8832, -15.5548,
-30.5935, -24.7571, -13.7009, -10.3432, -21.2464, -24.8118, -19.4080,
-14.9779, -11.7991, -18.4485, -20.1982, -17.3652, -20.6328, -28.2967,
-25.7819, -21.8962, -28.5083, -29.5719, -30.2120, -35.7033, -31.8218,
-34.0408, -37.7744, -33.9653, -31.3009, -30.9063, -28.6153, -32.2202,
-28.5456, -28.8579, -32.5170, -37.9152, -43.0052, -46.4849, -44.0786,
-39.1933, -33.2757, -31.6313, -42.6386, -52.3679, -53.5785, -55.6444,
-47.0050, -47.6459, -56.6361, -60.6781, -61.5244, -55.8272, -60.4832,
-58.1897
],
[
-38.2686, -36.6285, -32.5835, -35.1693, -37.7938, -37.4035, -35.3132,
-35.6083, -36.3609, -40.9472, -36.7846, -36.1544, -38.9076, -39.3618,
-35.4953, -34.2809, -39.9466, -39.7433, -34.8347, -37.5674, -41.5689,
-38.9161, -34.3947, -30.2924, -30.4841, -34.5831, -28.9261, -24.8849,
-31.2324, -27.1622, -27.2107, -25.9385, -30.1691, -30.9223, -23.9495,
-25.6047, -26.7119, -28.5523, -27.7481, -32.8427, -35.4650, -31.0399,
-31.2073, -30.5163, -22.9819, -20.8892, -19.2510, -24.7905, -28.9426,
-28.1998, -26.7386, -25.0140, -27.9223, -32.9913, -33.1864, -34.9742,
-38.5995, -39.6990, -29.3203, -22.4697, -25.6415, -33.5608, -33.0945,
-27.1716
],
[
-33.2015, -28.7741, -21.9457, -23.4888, -32.1072, -8.6307, 3.2724,
5.9157, -0.9221, -30.1814, -31.0015, -27.4508, -27.0477, -9.5342,
0.3221, 0.6511, -7.1596, -25.9707, -32.8924, -32.2300, -13.8974,
-0.4895, 0.9168, -10.7663, -27.1176, -35.0829, -11.6859, -4.8855,
-11.8898, -26.6167, -5.6192, -3.8443, -19.7947, -14.4101, -8.6236,
-21.2458, -21.0801, -17.9136, -24.4663, -18.6333, -24.8085, -15.5854,
-15.4344, -11.5046, -22.3625, -27.3387, -32.4353, -30.9670, -31.3789,
-35.4044, -34.4591, -25.2433, -28.0773, -33.8736, -33.0224, -33.3155,
-38.5302, -39.2741, -36.6395, -34.7729, -32.4483, -42.4001, -49.2857,
-39.1682
]
]
)
# fmt: on
MEL_BIN = 963
SEEDS = [987654321, 1234, 666, 5555]
input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])
feature_extractor = ClapFeatureExtractor()
for padding, EXPECTED_VALUES, seed in zip(
["repeat", "repeatpad", None, "pad"], EXPECTED_INPUT_FEATURES, SEEDS
):
set_seed(seed)
input_features = feature_extractor(
input_speech, return_tensors="pt", truncation="rand_trunc", padding=padding
).input_features
self.assertEqual(input_features.shape, (1, 1, 1001, 64))
self.assertTrue(torch.allclose(input_features[0, 0, MEL_BIN], EXPECTED_VALUES, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/jukebox/test_tokenization_jukebox.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class JukeboxTokenizationTest(unittest.TestCase):
tokenizer_class = JukeboxTokenizer
metas = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": """I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def test_1b_lyrics_tokenizer(self):
"""
how to run the same test with openAI
...
"""
import torch
tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
tokens = tokenizer(**self.metas)["input_ids"]
# fmt: off
EXPECTED_OUTPUT = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1069, 11]]),
torch.tensor([[0, 0, 0, 1069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def test_5b_lyrics_tokenizer(self):
"""
The outputs are similar that open AI but do not have the same format as this one is adapted to the HF integration.
"""
import torch
tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics")
tokens = tokenizer(**self.metas)["input_ids"]
# fmt: off
EXPECTED_OUTPUT = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/jukebox/test_modeling_jukebox.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import skip
from transformers import is_torch_available
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
require_torch_fp16,
slow,
torch_device,
)
from transformers.trainer_utils import set_seed
if is_torch_available():
import torch
from transformers import JukeboxModel, JukeboxPrior, JukeboxTokenizer
@require_torch
class Jukebox1bModelTester(unittest.TestCase):
all_model_classes = (JukeboxModel,) if is_torch_available() else ()
model_id = "openai/jukebox-1b-lyrics"
metas = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": """I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
# fmt: off
EXPECTED_OUTPUT_2 = [
1864, 1536, 1213, 1870, 1357, 1536, 519, 880, 1323, 789, 1082, 534,
1000, 1445, 1105, 1130, 967, 515, 1434, 1620, 534, 1495, 283, 1445,
333, 1307, 539, 1631, 1528, 375, 1434, 673, 627, 710, 778, 1883,
1405, 1276, 1455, 1228
]
EXPECTED_OUTPUT_2_PT_2 = [
1489, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653
]
EXPECTED_OUTPUT_1 = [
1125, 1751, 697, 1776, 1141, 1476, 391, 697, 1125, 684, 867, 416,
844, 1372, 1274, 717, 1274, 844, 1299, 1419, 697, 1370, 317, 1125,
191, 1440, 1370, 1440, 1370, 282, 1621, 1370, 368, 349, 867, 1872,
1262, 869, 1728, 747
]
EXPECTED_OUTPUT_1_PT_2 = [
416, 416, 1125, 1125, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416
]
EXPECTED_OUTPUT_0 = [
1755, 842, 307, 1843, 1022, 1395, 234, 1554, 806, 739, 1022, 442,
616, 556, 268, 1499, 933, 457, 1440, 1837, 755, 985, 308, 902,
293, 1443, 1671, 1141, 1533, 555, 1562, 1061, 287, 417, 1022, 2008,
1186, 1015, 1777, 268
]
EXPECTED_OUTPUT_0_PT_2 = [
854, 842, 1353, 114, 1353, 842, 185, 842, 185, 114, 591, 842,
185, 417, 185, 842, 307, 842, 591, 842, 185, 842, 307, 842,
591, 842, 1353, 842, 185, 842, 591, 842, 591, 114, 591, 842,
185, 842, 591, 89
]
EXPECTED_Y_COND = [1058304, 0, 786432, 7169, 507, 76, 27, 40, 30, 76]
EXPECTED_PRIMED_0 = [
390, 1160, 1002, 1907, 1788, 1788, 1788, 1907, 1002, 1002, 1854, 1002,
1002, 1002, 1002, 1002, 1002, 1160, 1160, 1606, 596, 596, 1160, 1002,
1516, 596, 1002, 1002, 1002, 1907, 1788, 1788, 1788, 1854, 1788, 1907,
1907, 1788, 596, 1626
]
EXPECTED_PRIMED_1 = [
1236, 1668, 1484, 1920, 1848, 1409, 139, 864, 1828, 1272, 1599, 824,
1672, 139, 555, 1484, 824, 1920, 555, 596, 1579, 1599, 1231, 1599,
1637, 1407, 212, 824, 1599, 116, 1433, 824, 258, 1599, 1433, 1895,
1063, 1433, 1433, 1599
]
EXPECTED_PRIMED_2 = [
1684, 1873, 1119, 1189, 395, 611, 1901, 972, 890, 1337, 1392, 1927,
96, 972, 672, 780, 1119, 890, 158, 771, 1073, 1927, 353, 1331,
1269, 1459, 1333, 1645, 812, 1577, 1337, 606, 353, 981, 1466, 619,
197, 391, 302, 1930
]
EXPECTED_VQVAE_ENCODE = [
390, 1160, 1002, 1907, 1788, 1788, 1788, 1907, 1002, 1002, 1854, 1002,
1002, 1002, 1002, 1002, 1002, 1160, 1160, 1606, 596, 596, 1160, 1002,
1516, 596, 1002, 1002, 1002, 1907, 1788, 1788, 1788, 1854, 1788, 1907,
1907, 1788, 596, 1626
]
EXPECTED_VQVAE_DECODE = [
-0.0492, -0.0524, -0.0565, -0.0640, -0.0686, -0.0684, -0.0677, -0.0664,
-0.0605, -0.0490, -0.0330, -0.0168, -0.0083, -0.0075, -0.0051, 0.0025,
0.0136, 0.0261, 0.0386, 0.0497, 0.0580, 0.0599, 0.0583, 0.0614,
0.0740, 0.0889, 0.1023, 0.1162, 0.1211, 0.1212, 0.1251, 0.1336,
0.1502, 0.1686, 0.1883, 0.2148, 0.2363, 0.2458, 0.2507, 0.2531
]
EXPECTED_AUDIO_COND = [
0.0256, -0.0544, 0.1600, -0.0032, 0.1066, 0.0825, -0.0013, 0.3440,
0.0210, 0.0412, -0.1777, -0.0892, -0.0164, 0.0285, -0.0613, -0.0617,
-0.0137, -0.0201, -0.0175, 0.0215, -0.0627, 0.0520, -0.0730, 0.0970,
-0.0100, 0.0442, -0.0586, 0.0207, -0.0015, -0.0082
]
EXPECTED_META_COND = [
0.0415, 0.0877, 0.0022, -0.0055, 0.0751, 0.0334, 0.0324, -0.0068,
0.0011, 0.0017, -0.0676, 0.0655, -0.0143, 0.0399, 0.0303, 0.0743,
-0.0168, -0.0394, -0.1113, 0.0124, 0.0442, 0.0267, -0.0003, -0.1536,
-0.0116, -0.1837, -0.0180, -0.1026, -0.0777, -0.0456
]
EXPECTED_LYRIC_COND = [
76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33,
45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76
]
# fmt: on
def prepare_inputs(self):
tokenizer = JukeboxTokenizer.from_pretrained(self.model_id)
tokens = tokenizer(**self.metas)["input_ids"]
return tokens
@slow
def test_sampling(self):
model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval()
labels = self.prepare_inputs()
set_seed(0)
zs = [torch.zeros(1, 0, dtype=torch.long).cpu() for _ in range(3)]
zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False)
self.assertIn(zs[0][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_2, self.EXPECTED_OUTPUT_2_PT_2])
set_seed(0)
zs = model._sample(zs, labels, [1], sample_length=40 * model.priors[1].raw_to_tokens, save_results=False)
self.assertIn(zs[1][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_1, self.EXPECTED_OUTPUT_1_PT_2])
set_seed(0)
zs = model._sample(zs, labels, [2], sample_length=40 * model.priors[2].raw_to_tokens, save_results=False)
self.assertIn(zs[2][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_0, self.EXPECTED_OUTPUT_0_PT_2])
@slow
def test_conditioning(self):
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval()
labels = self.prepare_inputs()
set_seed(0)
zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)]
top_prior = model.priors[0]
start = 0
music_token_conds = top_prior.get_music_tokens_conds(zs, start=start, end=start + top_prior.n_ctx)
metadata = top_prior.get_metadata(labels[0].clone(), start, 1058304, 0)
self.assertIsNone(music_token_conds)
self.assertListEqual(metadata.numpy()[0][:10].tolist(), self.EXPECTED_Y_COND)
audio_conditioning, metadata_conditioning, lyric_tokens = top_prior.get_cond(music_token_conds, metadata)
torch.testing.assert_allclose(
audio_conditioning[0][0][:30].detach(), torch.tensor(self.EXPECTED_AUDIO_COND), atol=1e-4, rtol=1e-4
)
torch.testing.assert_allclose(
metadata_conditioning[0][0][:30].detach(), torch.tensor(self.EXPECTED_META_COND), atol=1e-4, rtol=1e-4
)
torch.testing.assert_allclose(
lyric_tokens[0, :30].detach(), torch.tensor(self.EXPECTED_LYRIC_COND), atol=1e-4, rtol=1e-4
)
@slow
def test_primed_sampling(self):
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval()
set_seed(0)
waveform = torch.rand((1, 5120, 1))
tokens = list(self.prepare_inputs())
zs = [model.vqvae.encode(waveform, start_level=2, bs_chunks=waveform.shape[0])[0], None, None]
zs = model._sample(
zs, tokens, sample_levels=[0], save_results=False, sample_length=40 * model.priors[0].raw_to_tokens
)
torch.testing.assert_allclose(zs[0][0][:40], torch.tensor(self.EXPECTED_PRIMED_0))
upper_2 = torch.cat((zs[0], torch.zeros(1, 2048 - zs[0].shape[-1])), dim=-1).long()
zs = [upper_2, model.vqvae.encode(waveform, start_level=1, bs_chunks=waveform.shape[0])[0], None]
zs = model._sample(
zs, tokens, sample_levels=[1], save_results=False, sample_length=40 * model.priors[1].raw_to_tokens
)
torch.testing.assert_allclose(zs[1][0][:40], torch.tensor(self.EXPECTED_PRIMED_1))
upper_1 = torch.cat((zs[1], torch.zeros(1, 2048 - zs[1].shape[-1])), dim=-1).long()
zs = [upper_2, upper_1, model.vqvae.encode(waveform, start_level=0, bs_chunks=waveform.shape[0])[0]]
zs = model._sample(
zs, tokens, sample_levels=[2], save_results=False, sample_length=40 * model.priors[2].raw_to_tokens
)
torch.testing.assert_allclose(zs[2][0][:40].cpu(), torch.tensor(self.EXPECTED_PRIMED_2))
@slow
def test_vqvae(self):
model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval()
set_seed(0)
x = torch.rand((1, 5120, 1))
with torch.no_grad():
zs = model.vqvae.encode(x, start_level=2, bs_chunks=x.shape[0])
torch.testing.assert_allclose(zs[0][0], torch.tensor(self.EXPECTED_VQVAE_ENCODE))
with torch.no_grad():
x = model.vqvae.decode(zs, start_level=2, bs_chunks=x.shape[0])
torch.testing.assert_allclose(x[0, :40, 0], torch.tensor(self.EXPECTED_VQVAE_DECODE), atol=1e-4, rtol=1e-4)
@require_torch
class Jukebox5bModelTester(unittest.TestCase):
all_model_classes = (JukeboxModel,) if is_torch_available() else ()
model_id = "openai/jukebox-5b-lyrics"
metas = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": """I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
# fmt: off
EXPECTED_OUTPUT_2 = [
1489, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
1489, 1489, 1489, 1489, 1150, 1853, 1509, 1150, 1357, 1509, 6, 1272
]
EXPECTED_OUTPUT_2_PT_2 = [
1489, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653
]
EXPECTED_OUTPUT_1 = [
1125, 416, 1125, 1125, 1125, 1125, 1125, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416
]
EXPECTED_OUTPUT_1_PT_2 = [
416, 416, 1125, 1125, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416
]
EXPECTED_OUTPUT_0 = [
1755, 1061, 234, 1755, 1061, 1755, 185, 290, 307, 307, 616, 616,
616, 616, 616, 616, 307, 290, 417, 1755, 234, 1755, 185, 290,
290, 290, 307, 616, 616, 616, 616, 616, 290, 234, 234, 1755,
234, 234, 1755, 234, 185, 185, 307, 616, 616, 616, 616, 290,
1755, 1755, 1755, 234, 234, 1755, 1572, 290, 307, 616, 34, 616
]
EXPECTED_OUTPUT_0_PT_2 = [
854, 842, 1353, 114, 1353, 842, 185, 842, 185, 114, 591, 842, 185,
417, 185, 842, 307, 842, 591, 842, 185, 842, 185, 842, 591, 842,
1353, 842, 185, 842, 591, 842, 591, 114, 591, 842, 185, 842, 591,
89, 591, 842, 591, 842, 591, 417, 1372, 842, 1372, 842, 34, 842,
185, 89, 591, 842, 185, 842, 591, 632
]
EXPECTED_GPU_OUTPUTS_2 = [
1489, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653
]
EXPECTED_GPU_OUTPUTS_2_PT_2 = [
1489, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, 653,
653, 653, 653, 653, 653, 653, 653, 1853, 1177, 1536, 1228,
710, 475, 1489, 1229, 1224, 231, 1224, 252, 1434, 653, 475,
1106, 1877, 1599, 1228, 1600, 1683, 1182, 1853, 475, 1864,
252, 1229, 1434, 2001
]
EXPECTED_GPU_OUTPUTS_1 = [
1125, 1125, 416, 1125, 1125, 416, 1125, 1125, 416, 416, 1125, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416,
416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416, 416
]
EXPECTED_GPU_OUTPUTS_0 = [
491, 1755, 34, 1613, 1755, 417, 992, 1613, 222, 842, 1353, 1613,
844, 632, 185, 1613, 844, 632, 185, 1613, 185, 842, 677, 1613,
185, 114, 1353, 1613, 307, 89, 844, 1613, 307, 1332, 234, 1979,
307, 89, 1353, 616, 34, 842, 185, 842, 34, 842, 185, 842,
307, 114, 185, 89, 34, 1268, 185, 89, 34, 842, 185, 89
]
# fmt: on
def prepare_inputs(self, model_id):
tokenizer = JukeboxTokenizer.from_pretrained(model_id)
tokens = tokenizer(**self.metas)["input_ids"]
return tokens
@slow
def test_sampling(self):
model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval()
labels = self.prepare_inputs(self.model_id)
set_seed(0)
zs = [torch.zeros(1, 0, dtype=torch.long).cpu() for _ in range(3)]
zs = model._sample(zs, labels, [0], sample_length=60 * model.priors[0].raw_to_tokens, save_results=False)
self.assertIn(zs[0][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_2, self.EXPECTED_OUTPUT_2_PT_2])
set_seed(0)
zs = model._sample(zs, labels, [1], sample_length=60 * model.priors[1].raw_to_tokens, save_results=False)
self.assertIn(zs[1][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_1, self.EXPECTED_OUTPUT_1_PT_2])
set_seed(0)
zs = model._sample(zs, labels, [2], sample_length=60 * model.priors[2].raw_to_tokens, save_results=False)
self.assertIn(zs[2][0].detach().cpu().tolist(), [self.EXPECTED_OUTPUT_0, self.EXPECTED_OUTPUT_0_PT_2])
@slow
@require_torch_accelerator
@skip("Not enough GPU memory on CI runners")
def test_slow_sampling(self):
model = JukeboxModel.from_pretrained(self.model_id, min_duration=0).eval()
labels = [i.to(torch_device) for i in self.prepare_inputs(self.model_id)]
set_seed(0)
model.priors[0].to(torch_device)
zs = [torch.zeros(1, 0, dtype=torch.long).to(torch_device) for _ in range(3)]
zs = model._sample(zs, labels, [0], sample_length=60 * model.priors[0].raw_to_tokens, save_results=False)
torch.testing.assert_allclose(zs[0][0].cpu(), torch.tensor(self.EXPECTED_GPU_OUTPUTS_2))
model.priors[0].cpu()
set_seed(0)
model.priors[1].to(torch_device)
zs = model._sample(zs, labels, [1], sample_length=60 * model.priors[1].raw_to_tokens, save_results=False)
torch.testing.assert_allclose(zs[1][0].cpu(), torch.tensor(self.EXPECTED_GPU_OUTPUTS_1))
model.priors[1].cpu()
set_seed(0)
model.priors[2].to(torch_device)
zs = model._sample(zs, labels, [2], sample_length=60 * model.priors[2].raw_to_tokens, save_results=False)
torch.testing.assert_allclose(zs[2][0].cpu(), torch.tensor(self.EXPECTED_GPU_OUTPUTS_0))
@slow
@require_torch_accelerator
@require_torch_fp16
def test_fp16_slow_sampling(self):
prior_id = "ArthurZ/jukebox_prior_0"
model = JukeboxPrior.from_pretrained(prior_id, min_duration=0).eval().half().to(torch_device)
labels = self.prepare_inputs(prior_id)[0].to(torch_device)
metadata = model.get_metadata(labels, 0, 7680, 0)
set_seed(0)
outputs = model.sample(1, metadata=metadata, sample_tokens=60)
self.assertIn(outputs[0].cpu().tolist(), [self.EXPECTED_GPU_OUTPUTS_2, self.EXPECTED_GPU_OUTPUTS_2_PT_2])
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clvp/test_processor_clvp.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import shutil
import tempfile
import unittest
from transformers import ClvpFeatureExtractor, ClvpProcessor, ClvpTokenizer
from transformers.testing_utils import require_torch
from .test_feature_extraction_clvp import floats_list
@require_torch
class ClvpProcessorTest(unittest.TestCase):
def setUp(self):
self.checkpoint = "susnato/clvp_dev"
self.tmpdirname = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tmpdirname)
gc.collect()
# Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.get_tokenizer with Whisper->Clvp
def get_tokenizer(self, **kwargs):
return ClvpTokenizer.from_pretrained(self.checkpoint, **kwargs)
# Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.get_feature_extractor with Whisper->Clvp
def get_feature_extractor(self, **kwargs):
return ClvpFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
# Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_save_load_pretrained_default with Whisper->Clvp
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = ClvpProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, ClvpTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor)
# Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_feature_extractor with Whisper->Clvp,processor(raw_speech->processor(raw_speech=raw_speech
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech=raw_speech, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
# Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_tokenizer with Whisper->Clvp
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
# Copied from transformers.tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_tokenizer_decode with Whisper->Clvp
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_save_load_pretrained_additional_features(self):
processor = ClvpProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(pad_token="(PAD)")
feature_extractor_add_kwargs = self.get_feature_extractor(sampling_rate=16000)
processor = ClvpProcessor.from_pretrained(
self.tmpdirname,
pad_token="(PAD)",
sampling_rate=16000,
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, ClvpTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor)
def test_model_input_names(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
self.assertListEqual(
sorted(processor.model_input_names),
sorted(set(feature_extractor.model_input_names + tokenizer.model_input_names)),
msg="`processor` and `feature_extractor` model input names do not match",
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clvp/test_modeling_clvp.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Clvp model. """
import gc
import tempfile
import unittest
import datasets
import numpy as np
from transformers import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig
from transformers.testing_utils import (
require_torch,
slow,
torch_device,
)
from transformers.utils import is_torch_available
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration
from transformers.models.clvp.modeling_clvp import CLVP_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers import ClvpFeatureExtractor, ClvpTokenizer
class ClvpEncoderTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=7,
is_training=False,
use_input_mask=True,
use_labels=True,
vocab_size=50,
hidden_size=128,
projection_dim=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=32,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
def get_config(self):
encoder_config = ClvpEncoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
)
return encoder_config
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
encoder_config = self.get_config()
return encoder_config, input_ids, input_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
speech_config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids.to(torch_device), "attention_mask": input_mask.to(torch_device)}
return speech_config, inputs_dict
def create_and_check_model(self, speech_config, input_ids, input_mask):
text_config = ClvpEncoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
text_encoder_model = ClvpEncoder(config=text_config)
text_encoder_model.to(torch_device)
text_encoder_model.eval()
with torch.no_grad():
result = text_encoder_model(input_ids, attention_mask=input_mask)
result = text_encoder_model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim))
# now check with speech config
speech_encoder_model = ClvpEncoder(config=speech_config)
speech_encoder_model.to(torch_device)
speech_encoder_model.eval()
with torch.no_grad():
result = speech_encoder_model(input_ids, attention_mask=input_mask)
result = speech_encoder_model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result[0].shape, (self.batch_size, self.projection_dim))
@require_torch
class ClvpEncoderTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ClvpEncoder,) if is_torch_available() else ()
test_pruning = False
test_head_masking = False
test_torchscript = False
def setUp(self):
self.model_tester = ClvpEncoderTester(self)
self.encoder_config_tester = ConfigTester(self, config_class=ClvpEncoderConfig, hidden_size=32)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def test_config(self):
self.encoder_config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="ClvpEncoder does not output loss")
def test_training(self):
pass
@unittest.skip(reason="ClvpEncoder does not output loss")
def test_training_gradient_checkpointing(self):
pass
class ClvpDecoderTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=3,
is_training=False,
vocab_size=300,
max_position_embeddings=256,
max_text_tokens=256,
use_input_mask=True,
hidden_size=128,
num_hidden_layers=2,
num_attention_heads=2,
bos_token_id=97,
eos_token_id=98,
relative_attention_num_buckets=4,
relative_attention_max_distance=16,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.max_text_tokens = max_text_tokens
self.use_input_mask = use_input_mask
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
def get_config(self):
decoder_config = ClvpDecoderConfig(
vocab_size=self.vocab_size,
max_position_embeddings=self.max_position_embeddings,
max_text_tokens=self.max_text_tokens,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
relative_attention_num_buckets=self.relative_attention_num_buckets,
relative_attention_max_distance=self.relative_attention_max_distance,
)
return decoder_config
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
decoder_config = self.get_config()
return decoder_config, input_ids, input_mask
def create_and_check_model(self, config, input_ids, attention_mask):
model = ClvpForCausalLM(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids=input_ids, attention_mask=attention_mask)
self.parent.assertEqual(result[0].shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask = config_and_inputs
inputs_dict = {
"input_ids": input_ids.to(torch_device),
"attention_mask": attention_mask.to(torch_device),
}
return config, inputs_dict
@require_torch
class ClvpDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ClvpModel, ClvpForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (ClvpForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": ClvpModelForConditionalGeneration} if is_torch_available() else {}
test_pruning = False
def setUp(self):
self.model_tester = ClvpDecoderTester(self)
self.decoder_config_tester = ConfigTester(self, config_class=ClvpDecoderConfig, hidden_size=32)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
if return_labels and model_class == ClvpForCausalLM:
inputs_dict["labels"] = torch.zeros(
[self.model_tester.batch_size, self.model_tester.seq_length], device=torch_device
).long()
return inputs_dict
def test_training(self):
# we will only test the ClvpForCausalLM since it outputs loss
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
model = ClvpForCausalLM(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
# we will only test the ClvpForCausalLM since it outputs loss
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
model = ClvpForCausalLM(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, ClvpForCausalLM, return_labels=True)
loss = model(**inputs).loss
loss.backward()
class ClvpModelForConditionalGenerationTester:
def __init__(self, parent, is_training=False):
self.parent = parent
self.clvp_encoder_tester = ClvpEncoderTester(parent)
self.is_training = is_training
def get_config(self):
decoder_config = ClvpDecoderConfig(
vocab_size=50,
max_position_embeddings=30,
max_text_tokens=30,
hidden_size=128,
num_hidden_layers=1,
num_attention_heads=2,
bos_token_id=97,
eos_token_id=98,
relative_attention_num_buckets=4,
relative_attention_max_distance=16,
)
text_config = self.clvp_encoder_tester.get_config()
speech_config = self.clvp_encoder_tester.get_config()
speech_config.vocab_size = 300
return ClvpConfig.from_sub_model_configs(
text_config,
speech_config,
decoder_config,
projection_dim=16,
)
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.clvp_encoder_tester.prepare_config_and_inputs()
ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
_, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
feature_extractor = ClvpFeatureExtractor()
input_features = feature_extractor(raw_speech=audio, sampling_rate=sr, return_tensors="pt")[
"input_features"
].to(torch_device)
config = self.get_config()
return config, input_ids, attention_mask, input_features
def create_and_check_model(self, config, input_ids, attention_mask, input_features):
model = ClvpModelForConditionalGeneration(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids=input_ids, input_features=input_features, attention_mask=attention_mask)
self.parent.assertEqual(result.logits_per_speech.shape, (2, self.clvp_encoder_tester.batch_size))
self.parent.assertEqual(result.logits_per_text.shape, (self.clvp_encoder_tester.batch_size, 2))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, input_features = config_and_inputs
inputs_dict = {
"input_ids": input_ids.to(torch_device),
"attention_mask": attention_mask.to(torch_device),
"input_features": input_features.to(torch_device),
"return_loss": False,
}
return config, inputs_dict
@require_torch
class ClvpModelForConditionalGenerationTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ClvpModelForConditionalGeneration,) if is_torch_available() else ()
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
test_torchscript = False
def setUp(self):
self.model_tester = ClvpModelForConditionalGenerationTester(self)
self.clvp_config_tester = ConfigTester(self, config_class=ClvpConfig, hidden_size=32)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# check for decoder model, text encoder model and speech encoder model hidden states
decoder_hidden_states = outputs.decoder_hidden_states
text_encoder_hidden_states = outputs.text_encoder_hidden_states
speech_encoder_hidden_states = outputs.speech_encoder_hidden_states
# check length of the hidden states
expected_decoder_num_layers = config.decoder_config.num_hidden_layers + 1
self.assertEqual(len(decoder_hidden_states), expected_decoder_num_layers)
expected_speech_encoder_num_layers = config.text_config.num_hidden_layers + 1
self.assertEqual(len(text_encoder_hidden_states), expected_speech_encoder_num_layers)
expected_text_encoder_num_layers = config.speech_config.num_hidden_layers + 1
self.assertEqual(len(speech_encoder_hidden_states), expected_text_encoder_num_layers)
# check shapes of each hidden state
# for the decoder model we will only test the dimension because the ClvpConditioningEncoder could increase
# the sequence lengths.
self.assertEqual(decoder_hidden_states[0].shape[-1], config.decoder_config.hidden_size)
# the testing for text encoder stays standard because we just pass the text tokens here.
self.assertListEqual(
list(text_encoder_hidden_states[0].shape[-2:]),
[self.model_tester.clvp_encoder_tester.seq_length, config.text_config.hidden_size],
)
# for the decoder model we will only test the dimension because the fix_decoder_outputs method could increase
# the sequence lengths by adding `decoder_fixing_codes` tokens at the end.
self.assertEqual(speech_encoder_hidden_states[0].shape[-1], config.speech_config.hidden_size)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ClvpModelForConditionalGeneration does not have get_input_embeddings")
def test_model_common_attributes(self):
pass
# override as the `logit_scale` parameter initilization is different for Clvp
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
if name == "logit_scale":
expected_value = np.log(1 / 0.07)
returned_value = param.data.item()
self.assertAlmostEqual(
returned_value,
expected_value,
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
expected_range = [0.0, 1.0]
returned_range = ((param.data.mean() * 1e9).round() / 1e9).item()
self.assertIn(
returned_range,
expected_range,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_load_speech_text_decoder_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save ClvpConfig and check if we can load ClvpEncoderConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
encoder_config = ClvpEncoderConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), encoder_config.to_dict())
# Save ClvpConfig and check if we can load ClvpDecoderConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
decoder_config = ClvpDecoderConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.decoder_config.to_dict(), decoder_config.to_dict())
@slow
def test_model_from_pretrained(self):
for model_name in CLVP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ClvpModelForConditionalGeneration.from_pretrained(model_name)
self.assertIsNotNone(model)
# Since Clvp has a lot of different models connected with each other it's better to test each of them individually along
# with a test_full_model_integration. If the model breaks in future, it could be of a great help to identify the broken part.
@slow
@require_torch
class ClvpIntegrationTest(unittest.TestCase):
def setUp(self):
self.text = "This is an example text."
ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
_, self.speech_samples, self.sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
self.model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev").to(torch_device)
self.model.eval()
tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev")
tokenizer_output = tokenizer(self.text, return_tensors="pt")
self.text_tokens = tokenizer_output["input_ids"].to(torch_device)
self.input_features = feature_extractor(
raw_speech=self.speech_samples, sampling_rate=self.sr, return_tensors="pt"
)["input_features"].to(torch_device)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def test_conditional_encoder(self):
with torch.no_grad():
conditioning_encoder_outputs = self.model.conditioning_encoder(
input_features=self.input_features, input_ids=self.text_tokens
).to("cpu")
self.assertEqual(
conditioning_encoder_outputs.shape,
torch.Size((self.input_features.shape[0], 18, self.model.config.decoder_config.hidden_size)),
)
EXPECTED_OUTPUTS = torch.tensor(
[[-0.8582, 0.5228, 1.9944], [-0.0465, -1.1017, -0.0093], [-0.0466, -0.6030, -0.1280]]
)
self.assertTrue(torch.allclose(conditioning_encoder_outputs[0, :3, :3], EXPECTED_OUTPUTS, atol=1e-4))
def test_decoder_model_generate(self):
autoregressive_model_output = self.model.speech_decoder_model.generate(input_ids=self.text_tokens).cpu()
EXPECTED_OUTPUTS = torch.tensor([[147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 9, 8193]])
self.assertTrue(torch.allclose(autoregressive_model_output, EXPECTED_OUTPUTS))
def test_text_and_speech_encoder_models(self):
# check for text embeds
text_embeds = self.model.text_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu()
# fmt: off
EXPECTED_TEXT_EMBEDS = torch.tensor([1.4798, -2.0005, 2.3902, -0.5042, 1.6401, -2.4135, -1.4800, 3.0118, -2.4422, 1.3266, 2.2339, 1.4761, -4.8983, -1.3592, 6.0251, 6.7364, 2.2576, 3.7229, -10.0436, 4.6676])
# fmt: on
self.assertTrue(torch.allclose(text_embeds[0, :20], EXPECTED_TEXT_EMBEDS, atol=1e-4))
# check for speech embeds
speech_embeds = self.model.speech_encoder_model(input_ids=self.text_tokens, return_dict=True)[0].cpu()
# fmt: off
EXPECTED_SPEECH_EMBEDS = torch.tensor([3.1202, -3.1183, -1.4264, -6.1339, 1.8885, -0.1983, 0.9461, -1.7414, 0.3320, -3.8400, -1.5715, 1.5096, -1.7576, 0.2387, 4.9758, 5.8450, -6.2534, 2.8587, -5.5816, 4.7821])
# fmt: on
self.assertTrue(torch.allclose(speech_embeds[0, :20], EXPECTED_SPEECH_EMBEDS, atol=1e-4))
def test_full_model_integration(self):
full_model_output = self.model.generate(
input_ids=self.text_tokens,
input_features=self.input_features,
do_sample=False,
num_beams=4,
num_return_sequences=4,
max_new_tokens=10,
)
EXPECTED_SPEECH_IDS = torch.tensor([[1953, 1080, 612], [1953, 612, 493], [1953, 612, 716]])
EXPECTED_SIMILARITY_SCORES = torch.tensor([[14.7660, 14.4569, 13.6472, 13.5683]])
self.assertTrue(torch.allclose(full_model_output.speech_ids.cpu()[-3:, -3:], EXPECTED_SPEECH_IDS))
self.assertTrue(torch.allclose(full_model_output.logits_per_text.cpu(), EXPECTED_SIMILARITY_SCORES))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clvp/test_feature_extraction_clvp.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import Audio, load_dataset
from transformers import ClvpFeatureExtractor
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
global_rng = random.Random()
# Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.floats_list
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class ClvpFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=10,
hop_length=160,
chunk_length=8,
padding_value=0.0,
sampling_rate=4_000,
return_attention_mask=False,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.feature_size = feature_size
self.chunk_length = chunk_length
self.hop_length = hop_length
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
}
# Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester.prepare_inputs_for_common
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
class ClvpFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = ClvpFeatureExtractor
def setUp(self):
self.feat_extract_tester = ClvpFeatureExtractionTester(self)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
# Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_from_and_save_pretrained
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
# Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_to_json_file
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test truncation required
speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs]
np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated]
encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.float32)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
ds = ds.cast_column("audio", Audio(sampling_rate=22050))
# automatic decoding with librispeech
speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"]
return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples]
@slow
def test_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
0.9271, 1.1405, 1.4419, 1.2470, 1.2438, 1.1787, 1.0595, 1.0570, 1.1070,
1.2205, 1.2376, 1.2997, 1.1131, 1.0843, 1.0459, 1.1858, 1.2323, 1.3582,
1.3401, 1.3770, 1.4173, 1.3381, 1.2291, 1.0854, 1.2116, 1.1873, 1.2178,
1.2137, 1.3001, 1.4274
]
)
# fmt: on
input_speech, sr = self._load_datasamples(1)
feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev")
input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features
self.assertEqual(input_features.shape, (1, 80, 517))
self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/clvp/test_tokenization_clvp.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from typing import List
from transformers import ClvpTokenizer
from ...test_tokenization_common import TokenizerTesterMixin, slow
class ClvpTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = ClvpTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {"add_prefix_space": True}
test_seq2seq = False
test_sentencepiece_ignore_case = True
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
"[SPACE]",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, "vocab.json")
self.merges_file = os.path.join(self.tmpdirname, "merges.txt")
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.get_tokenizer with GPT2->Clvp
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return ClvpTokenizer.from_pretrained(self.tmpdirname, **kwargs)
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.get_input_output_texts
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
# Copied from transformers.tests.models.layoutxlm.test_tokenization_layoutxlm.LayoutXLMTokenizationTest.test_add_special_tokens
def test_add_special_tokens(self):
tokenizers: List[ClvpTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(
[special_token], boxes=[special_token_box], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_rust_and_python_full_tokenizers
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
sequence = "lower newer"
# Testing tokenization
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
# Testing conversion to ids without special tokens
ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
# Testing conversion to ids with special tokens
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
ids = tokenizer.encode(sequence, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# Testing the unknown token
input_tokens = tokens + [rust_tokenizer.unk_token]
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_padding
def test_padding(self, max_length=15):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Simple input
s = "This is a simple input"
s2 = ["This is a simple input 1", "This is a simple input 2"]
p = ("This is a simple input", "This is a pair")
p2 = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
s2,
max_length=max_length,
padding="max_length",
)
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
p2,
max_length=max_length,
padding="max_length",
)
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_padding_if_pad_token_set_slow
def test_padding_if_pad_token_set_slow(self):
tokenizer = ClvpTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>")
# Simple input
s = "This is a simple input"
s2 = ["This is a simple input looooooooong", "This is a simple input"]
p = ("This is a simple input", "This is a pair")
p2 = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
pad_token_id = tokenizer.pad_token_id
out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np")
out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np")
out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np")
out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 30)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_s2["input_ids"].shape[-1], 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_s2["input_ids"][0])
self.assertFalse(0 in out_s2["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_s2["input_ids"][1])
self.assertTrue(0 in out_s2["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 60)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_p2["input_ids"].shape[-1], 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_p2["input_ids"][0])
self.assertFalse(0 in out_p2["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_p2["input_ids"][1])
self.assertTrue(0 in out_p2["attention_mask"][1])
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_special_tokens_mask_input_pairs_and_bos_token
def test_special_tokens_mask_input_pairs_and_bos_token(self):
# TODO: change to self.get_tokenizers() when the fast version is implemented
tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)]
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
sequence_1 = "This one too please."
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
sequence_0,
sequence_1,
add_special_tokens=True,
return_special_tokens_mask=True,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_token_type_ids(self):
tokenizer = self.get_tokenizer()
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0, return_token_type_ids=True, add_special_tokens=True)
self.assertIn(0, output["token_type_ids"])
def test_full_tokenizer(self):
tokenizer = ClvpTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "lower newer"
bpe_tokens = ["l", "o", "w", "er", "[SPACE]", "n", "e", "w", "er"]
tokens = tokenizer.tokenize(text, add_prefix_space=False)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 15, 21, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_outputs_with_numbers(self):
text = "hello and this is an example text and I have $1000. my lucky number is 12345."
tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
# fmt: off
EXPECTED_OUTPUT = [62, 84, 28, 2, 53, 2,147, 2, 54, 2, 43, 2, 169, 122, 29, 64, 2, 136, 37, 33, 2, 53, 2, 22,
2, 148, 2, 110, 2, 40, 206, 53, 2, 134, 84, 59, 32, 9, 2, 125, 2, 25, 34, 197, 38, 2, 27,
231, 15, 44, 2, 54, 2, 33, 100, 25, 76, 2, 40, 206, 53, 7, 2, 40, 46, 18, 2, 21, 97, 17,
219, 2, 87, 210, 8, 19, 22, 76, 9,
]
# fmt: on
self.assertListEqual(tokenizer.encode(text, add_special_tokens=False), EXPECTED_OUTPUT)
@slow
def test_tokenizer_integration(self):
sequences = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over multiple pretrained "
"models and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
expected_encoding = {'input_ids': [[144, 43, 32, 87, 26, 173, 2, 5, 87, 26, 44, 70, 2, 209, 27, 2, 55, 2, 29, 38, 51, 31, 71, 8, 144, 43, 32, 87, 26, 173, 2, 53, 2, 29, 38, 51, 31, 71, 8, 29, 46, 144, 137, 49, 8, 15, 44, 33, 6, 2, 187, 35, 83, 61, 2, 20, 50, 44, 56, 8, 29, 121, 139, 66, 2, 59, 71, 60, 18, 16, 33, 34, 175, 2, 5, 15, 44, 33, 7, 2, 89, 15, 44, 33, 14, 7, 2, 37, 25, 26, 7, 2, 17, 54, 78, 25, 15, 44, 33, 7, 2, 37, 25, 111, 33, 9, 9, 9, 6, 2, 87, 2, 27, 48, 121, 56, 2, 25, 43, 20, 34, 14, 112, 2, 97, 234, 63, 53, 52, 2, 5, 27, 25, 34, 6, 2, 53, 2, 27, 48, 121, 56, 2, 25, 43, 20, 34, 14, 112, 2, 20, 50, 44, 158, 2, 5, 27, 25, 20, 6, 2, 103, 2, 253, 2, 26, 167, 78, 29, 64, 2, 29, 46, 144, 137, 49, 2, 115, 126, 25, 32, 2, 53, 2, 126, 18, 29, 2, 41, 114, 161, 44, 109, 151, 240, 2, 67, 33, 100, 50, 2, 23, 14, 37, 7, 2, 29, 38, 51, 31, 71, 2, 53, 2, 33, 50, 32, 57, 19, 25, 69, 9], [ 15, 44, 33, 2, 54, 2, 17, 61, 22, 20, 27, 49, 2, 51, 2, 29, 46, 8, 144, 137, 2, 126, 18, 29, 2, 15, 83, 22, 46, 16, 181, 56, 2, 46, 29, 175, 86, 158, 32, 2, 154, 2, 97, 25, 14, 67, 25, 49, 2, 136, 37, 33, 2, 185, 2, 23, 28, 41, 33, 70, 2, 135, 17, 60, 107, 52, 2, 47, 2, 165, 40, 2, 64, 19, 33, 2, 53, 2, 101, 104, 2, 135, 136, 37, 33, 2, 41, 2, 108, 2, 25, 88, 173, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 42, 2, 194, 91, 24, 2, 243, 190, 2, 182, 37, 2, 23, 231, 29, 32, 2, 253, 2, 42, 2, 25, 14, 39, 38, 2, 134, 20, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], # noqa: E501
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], # noqa: E501
}
# fmt: on
self.tokenizer_integration_test_util(
sequences=sequences, expected_encoding=expected_encoding, model_name="susnato/clvp_dev", padding=True
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/vivit/test_modeling_vivit.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViViT model. """
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VivitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VivitForVideoClassification, VivitModel
from transformers.models.vivit.modeling_vivit import VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VivitImageProcessor
class VivitModelTester:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
use_labels=True,
num_labels=10,
image_size=10,
num_frames=8, # decreased, because default 32 takes too much RAM at inference
tubelet_size=[2, 4, 4],
num_channels=3,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu_fast",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-06,
qkv_bias=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.image_size = image_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.num_channels = num_channels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.scope = scope
self.seq_length = (
(self.image_size // self.tubelet_size[2])
* (self.image_size // self.tubelet_size[1])
* (self.num_frames // self.tubelet_size[0])
) + 1 # CLS token
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]
)
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
config = VivitConfig(
num_frames=self.num_frames,
image_size=self.image_size,
tubelet_size=self.tubelet_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
layer_norm_eps=self.layer_norm_eps,
qkv_bias=self.qkv_bias,
)
config.num_labels = self.num_labels
return config
def create_and_check_model(self, config, pixel_values, labels):
model = VivitModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_video_classification(self, config, pixel_values, labels):
model = VivitForVideoClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify the logits shape
expected_shape = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape, expected_shape)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification}
if is_torch_available()
else {}
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = VivitModelTester(self)
self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Vivit does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values", "head_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_video_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = VivitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
seq_len = self.model_tester.seq_length
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# We will verify our results on a video of eating spaghetti
# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
def prepare_video():
file = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset"
)
video = np.load(file)
return list(video)
@require_torch
@require_vision
class VivitModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return VivitImageProcessor() if is_vision_available() else None
@slow
def test_inference_for_video_classification(self):
model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
image_processor = self.default_image_processor
video = prepare_video()
inputs = image_processor(video, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape, expected_shape)
# taken from original model
expected_slice = torch.tensor([-0.9498, 2.7971, -1.4049, 0.1024, -1.8353]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/vivit/test_image_processing_vivit.py
|
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class VivitImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
num_frames=10,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
crop_size=None,
):
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.crop_size = crop_size
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
def expected_output_image_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_video_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
num_frames=self.num_frames,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class VivitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = VivitImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = VivitImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
def test_rescale(self):
# ViVit optionally rescales between -1 and 1 instead of the usual 0 and 1
image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32)
image_processor = self.image_processing_class(**self.image_processor_dict)
rescaled_image = image_processor.rescale(image, scale=1 / 127.5)
expected_image = (image * (1 / 127.5)).astype(np.float32) - 1
self.assertTrue(np.allclose(rescaled_image, expected_image))
rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False)
expected_image = (image / 255.0).astype(np.float32)
self.assertTrue(np.allclose(rescaled_image, expected_image))
def test_call_pil(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], Image.Image)
# Test not batched input
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_numpy(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], np.ndarray)
# Test not batched input
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_numpy_4_channels(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], np.ndarray)
# Test not batched input
encoded_videos = image_processing(
video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first"
).pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(
video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first"
).pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
self.image_processor_tester.num_channels = 3
def test_call_pytorch(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], torch.Tensor)
# Test not batched input
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/speech_to_text_2/test_modeling_speech_to_text_2.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Speech2Text model. """
import unittest
from transformers import Speech2Text2Config
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.speech_to_text_2.modeling_speech_to_text_2 import (
Speech2Text2Decoder,
Speech2Text2ForCausalLM,
)
@require_torch
class Speech2Text2StandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
decoder_attention_heads=4,
max_position_embeddings=30,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = Speech2Text2Config(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = Speech2Text2Decoder(config=config).to(torch_device).eval()
input_ids = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((2, 1), config.vocab_size - 1) + 1
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
print(next_input_ids)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Speech2Text2StandaloneDecoderModelTest(
ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
all_model_classes = (Speech2Text2Decoder, Speech2Text2ForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (Speech2Text2ForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = {"text-generation": Speech2Text2ForCausalLM} if is_torch_available() else {}
fx_compatible = True
test_pruning = False
def setUp(
self,
):
self.model_tester = Speech2Text2StandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=Speech2Text2Config)
# not implemented currently
def test_inputs_embeds(self):
pass
@unittest.skip("This test is currently broken because of safetensors.")
def test_tf_from_pt_safetensors(self):
pass
# speech2text2 has no base model
def test_save_load_fast_init_from_base(self):
pass
# speech2text2 has no base model
def test_save_load_fast_init_to_base(self):
pass
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
# decoder cannot keep gradients
def test_retain_grad_hidden_states_attentions(self):
return
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/speech_to_text_2/test_tokenization_speech_to_text_2.py
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import unittest
from transformers.models.speech_to_text_2 import Speech2Text2Tokenizer
from transformers.models.speech_to_text_2.tokenization_speech_to_text_2 import VOCAB_FILES_NAMES
from ...test_tokenization_common import TokenizerTesterMixin
class SpeechToTextTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = Speech2Text2Tokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
vocab = "<s> <pad> </s> <unk> here@@ a couple of@@ words for the he@@ re@@ vocab".split(" ")
merges = ["he re</w> 123", "here a 1456"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
self.tmpdirname = tempfile.mkdtemp()
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-1], "vocab")
self.assertEqual(len(vocab_keys), 14)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 14)
def test_tokenizer_decode(self):
tokenizer = Speech2Text2Tokenizer.from_pretrained(self.tmpdirname)
# make sure @@ is correctly concatenated
token_ids = [4, 6, 8, 7, 10] # ["here@@", "couple", "words", "of@@", "the"]
output_string = tokenizer.decode(token_ids)
self.assertTrue(output_string == "herecouple words ofthe")
def test_load_no_merges_file(self):
tokenizer = Speech2Text2Tokenizer.from_pretrained(self.tmpdirname)
with tempfile.TemporaryDirectory() as tmp_dirname:
tokenizer.save_pretrained(tmp_dirname)
os.remove(os.path.join(tmp_dirname, "merges.txt"))
# load tokenizer without merges file should not throw an error
tokenizer = Speech2Text2Tokenizer.from_pretrained(tmp_dirname)
with tempfile.TemporaryDirectory() as tmp_dirname:
# save tokenizer and load again
tokenizer.save_pretrained(tmp_dirname)
tokenizer = Speech2Text2Tokenizer.from_pretrained(tmp_dirname)
self.assertIsNotNone(tokenizer)
# overwrite since merges_file is optional
def test_tokenizer_slow_store_full_signature(self):
if not self.test_slow_tokenizer:
return
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty and parameter_name != "merges_file":
self.assertIn(parameter_name, tokenizer.init_kwargs)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/layoutlmv2/test_processor_layoutlmv2.py
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from typing import List
import numpy as np
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
from transformers.models.layoutlmv2 import LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pytesseract, require_tokenizers, require_torch, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMv2ImageProcessor, LayoutLMv2Processor
@require_pytesseract
@require_tokenizers
class LayoutLMv2ProcessorTest(unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
def setUp(self):
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
image_processor_map = {
"do_resize": True,
"size": 224,
"apply_ocr": True,
}
self.tmpdirname = tempfile.mkdtemp()
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
self.image_processing_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.image_processing_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(image_processor_map) + "\n")
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_image_processor(self, **kwargs):
return LayoutLMv2ImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
image_processor = self.get_image_processor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast))
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = LayoutLMv2Processor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
# slow tokenizer
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(
self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
# fast tokenizer
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
processor = LayoutLMv2Processor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = LayoutLMv2Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
# add extra args
inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
@slow
def test_overflowing_tokens(self):
# In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences).
from datasets import load_dataset
# set up
datasets = load_dataset("nielsr/funsd")
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
def preprocess_data(examples):
images = [Image.open(path).convert("RGB") for path in examples["image_path"]]
words = examples["words"]
boxes = examples["bboxes"]
word_labels = examples["ner_tags"]
encoded_inputs = processor(
images,
words,
boxes=boxes,
word_labels=word_labels,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
stride=50,
return_offsets_mapping=True,
return_tensors="pt",
)
return encoded_inputs
train_data = preprocess_data(datasets["train"])
self.assertEqual(len(train_data["image"]), len(train_data["input_ids"]))
# different use cases tests
@require_torch
@require_pytesseract
class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase):
@cached_property
def get_images(self):
# we verify our implementation on 2 document images from the DocVQA dataset
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
image_1 = Image.open(ds[0]["file"]).convert("RGB")
image_2 = Image.open(ds[1]["file"]).convert("RGB")
return image_1, image_2
@cached_property
def get_tokenizers(self):
slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
return [slow_tokenizer, fast_tokenizer]
@slow
def test_processor_case_1(self):
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
image_processor = LayoutLMv2ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
input_image_proc = image_processor(images[0], return_tensors="pt")
input_processor = processor(images[0], return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify image
self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
input_image_proc = image_processor(images, return_tensors="pt")
input_processor = processor(images, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify images
self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # fmt: skip
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
@slow
def test_processor_case_2(self):
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
# verify keys
expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_3(self):
# case 3: token classification (training), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["weirdly", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] weirdly world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify labels
expected_labels = [-100, 1, -100, 2, -100]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] my name is niels [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
# verify labels
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
@slow
def test_processor_case_4(self):
# case 4: visual question answering (inference), apply_ocr=True
image_processor = LayoutLMv2ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
input_processor = processor(
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # fmt: skip
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_5(self):
# case 5: visual question answering (inference), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch LayoutLMv2 model. """
import unittest
from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device
from transformers.utils import is_detectron2_available, is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LayoutLMv2Config,
LayoutLMv2ForQuestionAnswering,
LayoutLMv2ForSequenceClassification,
LayoutLMv2ForTokenClassification,
LayoutLMv2Model,
)
from transformers.models.layoutlmv2.modeling_layoutlmv2 import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_detectron2_available():
from detectron2.structures.image_list import ImageList
class LayoutLMv2ModelTester:
def __init__(
self,
parent,
batch_size=2,
num_channels=3,
image_size=4,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=36,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
image_feature_pool_shape=[7, 7, 256],
coordinate_size=6,
shape_size=6,
num_labels=3,
num_choices=4,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.image_feature_pool_shape = image_feature_pool_shape
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.range_bbox = range_bbox
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
image = ImageList(
torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device),
self.image_size,
)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = LayoutLMv2Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
image_feature_pool_shape=self.image_feature_pool_shape,
coordinate_size=self.coordinate_size,
shape_size=self.shape_size,
)
# use smaller resnet backbone to make tests faster
config.detectron2_config_args["MODEL.RESNETS.DEPTH"] = 18
config.detectron2_config_args["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 64
config.detectron2_config_args["MODEL.RESNETS.NUM_GROUPS"] = 1
return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
def create_and_check_model(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
model = LayoutLMv2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, image=image)
# LayoutLMv2 has a different expected sequence length, namely also visual tokens are added
expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1]
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = LayoutLMv2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = LayoutLMv2ForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
model = LayoutLMv2ForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
image,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"image": image,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
@require_detectron2
class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
test_pruning = False
test_torchscript = True
test_mismatched_shapes = False
all_model_classes = (
(
LayoutLMv2Model,
LayoutLMv2ForSequenceClassification,
LayoutLMv2ForTokenClassification,
LayoutLMv2ForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"document-question-answering": LayoutLMv2ForQuestionAnswering, "feature-extraction": LayoutLMv2Model}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = LayoutLMv2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"LayoutLMV2 and its dependency `detectron2` have some layers using `add_module` which doesn't work well"
" with `nn.DataParallel`"
)
)
def test_multi_gpu_data_parallel_forward(self):
pass
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# LayoutLMv2 has a different expected sequence length
expected_seq_len = (
self.model_tester.seq_length
+ self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1]
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# LayoutLMv2 has a different expected sequence length
expected_seq_len = (
self.model_tester.seq_length
+ self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1]
)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[expected_seq_len, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip("We cannot configure detectron2 to output a smaller backbone")
def test_model_is_small(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = LayoutLMv2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if "backbone" in name or "visual_segment_embedding" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def prepare_layoutlmv2_batch_inputs():
# Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on:
# fmt: off
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231
image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# fmt: on
return input_ids, bbox, image, attention_mask, token_type_ids
@require_torch
@require_detectron2
class LayoutLMv2ModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device)
(
input_ids,
bbox,
image,
attention_mask,
token_type_ids,
) = prepare_layoutlmv2_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids.to(torch_device),
bbox=bbox.to(torch_device),
image=image.to(torch_device),
attention_mask=attention_mask.to(torch_device),
token_type_ids=token_type_ids.to(torch_device),
)
# verify the sequence output
expected_shape = torch.Size(
(
2,
input_ids.shape[1]
+ model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1],
model.config.hidden_size,
)
)
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device
)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-3))
# verify the pooled output
expected_shape = torch.Size((2, model.config.hidden_size))
self.assertEqual(outputs.pooler_output.shape, expected_shape)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import shutil
import tempfile
import unittest
from typing import List
from transformers import (
AddedToken,
LayoutLMv2TokenizerFast,
SpecialTokensMixin,
is_tf_available,
is_torch_available,
logging,
)
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import (
VOCAB_FILES_NAMES,
BasicTokenizer,
LayoutLMv2Tokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_detectron2,
require_pandas,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import (
SMALL_TRAINING_CORPUS,
TokenizerTesterMixin,
filter_non_english,
merge_model_tokenizer_mappings,
)
logger = logging.get_logger(__name__)
@require_tokenizers
@require_pandas
class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LayoutLMv2Tokenizer
rust_tokenizer_class = LayoutLMv2TokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_words_and_boxes(self):
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return words, boxes
def get_words_and_boxes_batch(self):
words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return words, boxes
def get_question_words_and_boxes(self):
question = "what's his name?"
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return question, words, boxes
def get_question_words_and_boxes_batch(self):
questions = ["what's his name?", "how is he called?"]
words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return questions, words, boxes
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"what",
"s",
"his",
"name",
"?",
"a",
"weird",
"##ly",
"test",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(t) for t in ["Hello", "\xad", "hello"]], [["[UNK]"], [], ["[UNK]"]])
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutlmv2-base-uncased")
question, words, boxes = self.get_question_words_and_boxes()
text = tokenizer.encode(
question.split(),
boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))],
add_special_tokens=False,
)
text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2 + [102]
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
words[1] = tokenizer_r.mask_token
tokens = tokenizer_r.encode_plus(
words,
boxes=boxes,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
expected_results = [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((0, 6), tokenizer_r.mask_token),
((0, 4), "test"),
((0, 0), tokenizer_r.sep_token),
]
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(
[special_token], boxes=[special_token_box], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[LayoutLMv2Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
words = "aaaaa bbbbbb low cccccccccdddddddd l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(
words,
boxes=boxes,
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
@unittest.skip("Not implemented")
def test_split_special_tokens(self):
pass
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
# Test right padding
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)
self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)
self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)
# Test left padding
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)
self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)
self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [0] * padding_size == right_padded_token_type_ids
assert [0] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)
self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
tokens = []
for word in words:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
output_text = "a weirdly test"
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)
)
# test 2: two sequences
question, words, boxes = self.get_question_words_and_boxes()
sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
"""We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_idx = tokenizer.pad_token_id
# Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
# FIXME: the next line should be padding(max_length) to avoid warning
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# Check that nothing is done when a maximum length is not specified
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_padding(self, max_length=50):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
# Encode - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True)
input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest")
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
pad_to_max_length=True,
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
words,
boxes=boxes,
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest")
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
questions, words, boxes = self.get_question_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
words, boxes = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
words,
boxes=boxes,
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_padding_warning_message_fast_tokenizer(self):
if not self.test_rust_tokenizer:
return
words, boxes = self.get_words_and_boxes_batch()
tokenizer_fast = self.get_rust_tokenizer()
encoding_fast = tokenizer_fast(
words,
boxes=boxes,
)
with self.assertLogs("transformers", level="WARNING") as cm:
tokenizer_fast.pad(encoding_fast)
self.assertEqual(len(cm.records), 1)
self.assertIn(
"Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to"
" encode the text followed by a call to the `pad` method to get a padded encoding.",
cm.records[0].message,
)
if not self.test_slow_tokenizer:
return
tokenizer_slow = self.get_tokenizer()
encoding_slow = tokenizer_slow(
words,
boxes=boxes,
)
with self.assertLogs(level="WARNING") as cm:
# We want to assert there are no warnings, but the 'assertLogs' method does not support that.
# Therefore, we are adding a dummy warning, and then we will assert it is the only warning.
logger.warning("Dummy warning")
tokenizer_slow.pad(encoding_slow)
self.assertEqual(len(cm.records), 1)
self.assertIn(
"Dummy warning",
cm.records[0].message,
)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Test not batched
words, boxes = self.get_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
question, words, boxes = self.get_question_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences = [
tokenizer.encode_plus(words_example, boxes=boxes_example)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences_padded = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=True
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, padding=False
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
words, boxes = self.get_words_and_boxes()
# empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)
# for key, value in empty_tokens.items():
# self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
words,
boxes=boxes,
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Input tokens id
words, boxes = self.get_words_and_boxes()
input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# Testing single inputs
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
words, boxes = self.get_words_and_boxes()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(words, boxes=boxes)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
output = tokenizer(words, boxes=boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
# test 2: two sequences (question + words)
question, words, boxes = self.get_question_words_and_boxes()
output = tokenizer(question, words, boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertIn(1, output["token_type_ids"])
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(text))]
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
text = "what's his name"
pair = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(
text,
pair,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@require_torch
@require_detectron2
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(
[words, words], boxes=[boxes, boxes], return_tensors="pt"
)
# We add dummy image keys (as LayoutLMv2 actually also requires a feature extractor
# to prepare the image input)
encoded_sequence["image"] = torch.randn(1, 3, 224, 224)
batch_encoded_sequence["image"] = torch.randn(2, 3, 224, 224)
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
words, boxes = self.get_words_and_boxes()
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
# Ensure basic input match
input_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
words = ["hello" for _ in range(1000)]
boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)]
# Ensure truncation match
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(
lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()
):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words, boxes = self.get_words_and_boxes()
tokens_r = tokenizer_r.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
tokens_p = tokenizer_p.encode_plus(
words,
boxes=boxes,
add_special_tokens=True,
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
words, boxes = self.get_words_and_boxes()
# tokenize()
no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
words, boxes = self.get_words_and_boxes_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
@slow
def test_layoutlmv2_truncation_integration_test(self):
words, boxes = self.get_words_and_boxes()
tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased", model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
# A Tensor cannot be build by sequences which are not the same size
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
words,
boxes=boxes,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
words, boxes=boxes, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = ["With", "these", "inputs."]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0.split(), boxes=boxes)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, boxes=boxes)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
r_output = tokenizer_r.encode(words, boxes=boxes)
special_token_id = tokenizer_r.encode(
["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False
)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True
)
tokenizer_p = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
p_output = tokenizer_p.encode(words, boxes=boxes)
cr_output = tokenizer_cr.encode(words, boxes=boxes)
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
text = [["this", "is", "the"], ["how", "are", "you"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]
inputs = new_tokenizer(text, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is the"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens_extended,
new_tokenizer.all_special_tokens_extended,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, f"_{token}") is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens_extended:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}",
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens_extended,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)
# Test we can use the new tokenizer with something not seen during training
words = [["this", "is"], ["hello", "🤗"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]
inputs = new_tokenizer(words, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
# only test prepare_for_model for the slow tokenizer
if tokenizer.__class__.__name__ == "LayoutLMv2TokenizerFast":
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True)
input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
# rename encoded batch to "inputs"
input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
# Renaming `input_ids` to `inputs`
tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]
tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]
input_r = tokenizer_r.pad(input_r, padding="longest")
input_p = tokenizer_r.pad(input_p, padding="longest")
max_length = len(input_p["inputs"][0])
self.assert_batch_padded_input_match(
input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"
)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
if is_torch_available():
returned_tensor = "pt"
elif is_tf_available():
returned_tensor = "tf"
else:
returned_tensor = "jax"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip("TO DO: overwrite this very extensive test.")
def test_alignement_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(
filter(
lambda t: [t[0]]
== tokenizer.encode(t[1].split(" "), boxes=len(t[1]) * [[1, 1, 1, 1]], add_special_tokens=False),
toks,
)
)
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
words = output_txt.split(" ")
boxes = [[i, i, i, i] for i in range(len(words))]
output_ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
return words, boxes, output_ids
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Build a sequence from our model's vocabulary
stride = 2
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = " ".join(map(str, seq_0))
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride)
question_1 = "This is another sentence to be encoded."
seq_1 = ["what", "a", "weird", "test", "weirdly", "weird"]
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2:
seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"]
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(" ")
boxes_1 = [[i, i, i, i] for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride)
smallest = (
seq1_tokens["input_ids"]
if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"])
else seq0_tokens["input_ids"]
)
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer(
question_0, seq_1, boxes=boxes_1, add_special_tokens=False
) # , add_prefix_space=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
question_2 = " ".join(map(str, seq_2))
boxes_2 = boxes_0 * model_max_length
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(question_2, seq_1, boxes=boxes_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.")
self.assertGreater(
total_length2, model_max_length, "Issue with the testing sequence, please update it."
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(
question_2,
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_2],
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
[question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
truncated_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][:-2]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
truncated_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"][-(2 + stride) :]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"]
)
overflow_second_sequence = (
tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["input_ids"][-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
bbox_first = [[0, 0, 0, 0]] * (len(seq_0) - 2)
bbox_first_sequence = bbox_first + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"]
overflowing_token_bbox_first_sequence_slow = [[0, 0, 0, 0]] * (2 + stride)
overflowing_token_bbox_first_sequence_fast = [[0, 0, 0, 0]] * (2 + stride) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"]
bbox_second = [[0, 0, 0, 0]] * len(seq_0)
bbox_second_sequence = (
bbox_second + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)["bbox"][:-2]
)
overflowing_token_bbox_second_sequence_slow = tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
overflowing_token_bbox_second_sequence_fast = [[0, 0, 0, 0]] * len(seq_0) + tokenizer(
seq_1, boxes=boxes_1, add_special_tokens=False
)["bbox"][-(2 + stride) :]
bbox_longest_sequence = (
bbox_first_sequence if len(seq0_tokens) > len(seq1_tokens) else bbox_second_sequence
)
overflowing_token_bbox_longest_sequence_fast = (
overflowing_token_bbox_first_sequence_fast
if len(seq0_tokens) > len(seq1_tokens)
else overflowing_token_bbox_second_sequence_fast
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(len(overflowing_bbox), 2 + stride + len(smallest))
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
bbox = information_first_truncated["bbox"][0]
overflowing_bbox = information_first_truncated["bbox"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_fast)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
overflowing_bbox = information_first_truncated["overflowing_token_boxes"]
bbox = information_first_truncated["bbox"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_slow)
information_second_truncated = tokenizer(
question_0,
seq_1,
boxes=boxes_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
bbox = information_second_truncated["bbox"][0]
overflowing_bbox = information_second_truncated["bbox"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_fast)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
bbox = information_second_truncated["bbox"]
overflowing_bbox = information_second_truncated["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_slow)
# @unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, boxes_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
total_length = len(sequence["input_ids"])
self.assertGreater(
total_length, 4, "Issue with the testing sequence, please update it, it's too short"
)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
boxes_1 = boxes_0 * model_max_length
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1,
model_max_length,
"Issue with the testing sequence, please update it, it's too short",
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(
seq_1,
boxes=boxes_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["bbox"]), model_max_length)
output = tokenizer(
[seq_1],
boxes=[boxes_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["bbox"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["bbox"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["bbox"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and bbox sequence with truncation
stride = 2
information = tokenizer(
seq_0,
boxes=boxes_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, LayoutLMv2TokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
bbox = information["bbox"][0]
overflowing_bbox = information["bbox"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
bbox = information["bbox"]
overflowing_bbox = information["overflowing_token_boxes"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(bbox, sequence["bbox"][:-2])
self.assertEqual(overflowing_bbox, sequence["bbox"][-(2 + stride) :])
@unittest.skip("LayoutLMv2 tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip("LayoutLMv2 fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
@slow
def test_only_label_first_subword(self):
words = ["hello", "niels"]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
word_labels = [0, 1]
# test slow tokenizer
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
# test fast tokenizer
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_r = LayoutLMv2Tokenizer.from_pretrained(
"microsoft/layoutlmv2-base-uncased", only_label_first_subword=False
)
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
@slow
def test_layoutlmv2_integration_test(self):
tokenizer_p = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
tokenizer_r = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
# There are 3 cases:
# CASE 1: document image classification (training + inference), document image token classification (inference),
# in which case only words and normalized bounding boxes are provided to the tokenizer
# CASE 2: document image token classification (training),
# in which case one also provides word labels to the tokenizer
# CASE 3: document image visual question answering (inference),
# in which case one also provides a question to the tokenizer
# We need to test all 3 cases both on batched and non-batched inputs.
# CASE 1: not batched
words, boxes = self.get_words_and_boxes()
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 1: batched
words, boxes = self.get_words_and_boxes_batch()
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: not batched
words, boxes = self.get_words_and_boxes()
word_labels = [1, 2, 3]
expected_results = {'input_ids': [101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: batched
words, boxes = self.get_words_and_boxes_batch()
word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
expected_results = {'input_ids': [[101, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 7592, 2026, 2171, 2003, 3960, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: not batched
question, words, boxes = self.get_question_words_and_boxes()
expected_results = {'input_ids': [101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip
encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: batched
questions, words, boxes = self.get_question_words_and_boxes_batch()
expected_results = {'input_ids': [[101, 2054, 1005, 1055, 2010, 2171, 1029, 102, 1037, 6881, 2135, 3231, 102, 0, 0, 0, 0, 0, 0, 0], [101, 2129, 2003, 2002, 2170, 1029, 102, 2054, 1037, 21110, 2546, 3806, 2102, 2078, 102, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]]} # fmt: skip
encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20)
encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
@unittest.skip("Chat is not supported")
def test_chat_template(self):
pass
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py
|
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMv2ImageProcessor
class LayoutLMv2ImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
apply_ocr=True,
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.apply_ocr = apply_ocr
def prepare_image_processor_dict(self):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_pytesseract
class LayoutLMv2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None
def setUp(self):
self.image_processor_tester = LayoutLMv2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "apply_ocr"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_layoutlmv2_integration_test(self):
# with apply_OCR = True
image_processing = LayoutLMv2ImageProcessor()
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
image = Image.open(ds[0]["file"]).convert("RGB")
encoding = image_processing(image, return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
self.assertEqual(len(encoding.words), len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
expected_boxes = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, expected_words)
self.assertListEqual(encoding.boxes, expected_boxes)
# with apply_OCR = False
image_processing = LayoutLMv2ImageProcessor(apply_ocr=False)
encoding = image_processing(image, return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/dpr/test_tokenization_dpr.py
|
# coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import (
DPRContextEncoderTokenizer,
DPRContextEncoderTokenizerFast,
DPRQuestionEncoderTokenizer,
DPRQuestionEncoderTokenizerFast,
DPRReaderOutput,
DPRReaderTokenizer,
DPRReaderTokenizerFast,
)
from transformers.testing_utils import require_tokenizers, slow
from transformers.tokenization_utils_base import BatchEncoding
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class DPRContextEncoderTokenizationTest(BertTokenizationTest):
tokenizer_class = DPRContextEncoderTokenizer
rust_tokenizer_class = DPRContextEncoderTokenizerFast
test_rust_tokenizer = True
@require_tokenizers
class DPRQuestionEncoderTokenizationTest(BertTokenizationTest):
tokenizer_class = DPRQuestionEncoderTokenizer
rust_tokenizer_class = DPRQuestionEncoderTokenizerFast
test_rust_tokenizer = True
@require_tokenizers
class DPRReaderTokenizationTest(BertTokenizationTest):
tokenizer_class = DPRReaderTokenizer
rust_tokenizer_class = DPRReaderTokenizerFast
test_rust_tokenizer = True
@slow
def test_decode_best_spans(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
text_1 = tokenizer.encode("question sequence", add_special_tokens=False)
text_2 = tokenizer.encode("title sequence", add_special_tokens=False)
text_3 = tokenizer.encode("text sequence " * 4, add_special_tokens=False)
input_ids = [[101] + text_1 + [102] + text_2 + [102] + text_3]
reader_input = BatchEncoding({"input_ids": input_ids})
start_logits = [[0] * len(input_ids[0])]
end_logits = [[0] * len(input_ids[0])]
relevance_logits = [0]
reader_output = DPRReaderOutput(start_logits, end_logits, relevance_logits)
start_index, end_index = 8, 9
start_logits[0][start_index] = 10
end_logits[0][end_index] = 10
predicted_spans = tokenizer.decode_best_spans(reader_input, reader_output)
self.assertEqual(predicted_spans[0].start_index, start_index)
self.assertEqual(predicted_spans[0].end_index, end_index)
self.assertEqual(predicted_spans[0].doc_id, 0)
@slow
def test_call(self):
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
text_1 = tokenizer.encode("question sequence", add_special_tokens=False)
text_2 = tokenizer.encode("title sequence", add_special_tokens=False)
text_3 = tokenizer.encode("text sequence", add_special_tokens=False)
expected_input_ids = [101] + text_1 + [102] + text_2 + [102] + text_3
encoded_input = tokenizer(questions=["question sequence"], titles=["title sequence"], texts=["text sequence"])
self.assertIn("input_ids", encoded_input)
self.assertIn("attention_mask", encoded_input)
self.assertListEqual(encoded_input["input_ids"][0], expected_input_ids)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/dpr/test_modeling_tf_dpr.py
|
# coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class TFDPRModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
projection_dim=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.projection_dim = projection_dim
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
config = DPRConfig(projection_dim=self.projection_dim, **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_dpr_context_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDPRContextEncoder(config=config)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_dpr_question_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDPRQuestionEncoder(config=config)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_dpr_reader(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDPRReader(config=config)
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class TFDPRModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
test_resize_embeddings = False
test_missing_keys = False
test_pruning = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFDPRModelTester(self)
self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_dpr_context_encoder_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*config_and_inputs)
def test_dpr_question_encoder_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*config_and_inputs)
def test_dpr_reader_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRContextEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRContextEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRQuestionEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFDPRReader.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFDPRModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
input_ids = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]]
) # [CLS] hello, is my dog cute? [SEP]
output = model(input_ids)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
expected_slice = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
]
)
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/dpr/test_modeling_dpr.py
|
# coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import DPRConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DPRContextEncoder, DPRQuestionEncoder, DPRReader, DPRReaderTokenizer
from transformers.models.dpr.modeling_dpr import (
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class DPRModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=False,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
projection_dim=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.projection_dim = projection_dim
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return DPRConfig(
projection_dim=self.projection_dim,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def create_and_check_context_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DPRContextEncoder(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_question_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DPRQuestionEncoder(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_reader(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DPRReader(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class DPRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": DPRQuestionEncoder} if is_torch_available() else {}
test_resize_embeddings = False
test_missing_keys = False # why?
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = DPRModelTester(self)
self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_context_encoder_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_context_encoder(*config_and_inputs)
def test_question_encoder_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_question_encoder(*config_and_inputs)
def test_reader_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reader(*config_and_inputs)
def test_init_changed_config(self):
config = self.model_tester.prepare_config_and_inputs()[0]
model = DPRQuestionEncoder(config=config)
model.to(torch_device)
model.eval()
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model = DPRQuestionEncoder.from_pretrained(tmp_dirname, projection_dim=512)
self.assertIsNotNone(model)
@slow
def test_model_from_pretrained(self):
for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DPRContextEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DPRContextEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DPRQuestionEncoder.from_pretrained(model_name)
self.assertIsNotNone(model)
for model_name in DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = DPRReader.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class DPRModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", return_dict=False)
model.to(torch_device)
input_ids = torch.tensor(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]], dtype=torch.long, device=torch_device
) # [CLS] hello, is my dog cute? [SEP]
output = model(input_ids)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
],
dtype=torch.float,
device=torch_device,
)
self.assertTrue(torch.allclose(output[:, :10], expected_slice, atol=1e-4))
@slow
def test_reader_inference(self):
tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
model.to(torch_device)
encoded_inputs = tokenizer(
questions="What is love ?",
titles="Haddaway",
texts="What Is Love is a song recorded by the artist Haddaway",
padding=True,
return_tensors="pt",
)
encoded_inputs.to(torch_device)
outputs = model(**encoded_inputs)
# compare the actual values for a slice.
expected_start_logits = torch.tensor(
[[-10.3005, -10.7765, -11.4872, -11.6841, -11.9312, -10.3002, -9.8544, -11.7378, -12.0821, -10.2975]],
dtype=torch.float,
device=torch_device,
)
expected_end_logits = torch.tensor(
[[-11.0684, -11.7041, -11.5397, -10.3465, -10.8791, -6.8443, -11.9959, -11.0364, -10.0096, -6.8405]],
dtype=torch.float,
device=torch_device,
)
self.assertTrue(torch.allclose(outputs.start_logits[:, :10], expected_start_logits, atol=1e-4))
self.assertTrue(torch.allclose(outputs.end_logits[:, :10], expected_end_logits, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/barthez/test_tokenization_barthez.py
|
# coding=utf-8
# Copyright 2020 Ecole Polytechnique and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class BarthezTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BarthezTokenizer
rust_tokenizer_class = BarthezTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname, legacy_format=False)
self.tokenizer = tokenizer
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<pad>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-1], "<mask>")
self.assertEqual(len(vocab_keys), 101_122)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 101_122)
@require_torch
def test_prepare_batch(self):
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
expected_src_tokens = [0, 57, 3018, 70307, 91, 2]
batch = self.tokenizer(
src_text, max_length=len(expected_src_tokens), padding=True, truncation=True, return_tensors="pt"
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 6), batch.input_ids.shape)
self.assertEqual((2, 6), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(expected_src_tokens, result)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip
# moussaKam/mbarthez is a french model. So we also use french texts.
sequences = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="moussaKam/mbarthez",
revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6",
sequences=sequences,
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/pix2struct/test_processor_pix2struct.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
if is_torch_available():
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
Pix2StructImageProcessor,
Pix2StructProcessor,
PreTrainedTokenizerFast,
T5Tokenizer,
)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_vision
@require_torch
class Pix2StructProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = Pix2StructImageProcessor()
tokenizer = T5Tokenizer.from_pretrained("t5-small")
processor = Pix2StructProcessor(image_processor, tokenizer)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""
This function prepares a list of random PIL images of the same fixed size.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_additional_features(self):
processor = Pix2StructProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = Pix2StructProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, Pix2StructImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str, return_token_type_ids=False, add_special_tokens=True)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()), ["flattened_patches", "attention_mask", "decoder_attention_mask", "decoder_input_ids"]
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_processor_max_patches(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
max_patches = [512, 1024, 2048, 4096]
expected_hidden_size = [770, 770, 770, 770]
# with text
for i, max_patch in enumerate(max_patches):
inputs = processor(text=input_str, images=image_input, max_patches=max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[0], max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[1], expected_hidden_size[i])
# without text input
for i, max_patch in enumerate(max_patches):
inputs = processor(images=image_input, max_patches=max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[0], max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[1], expected_hidden_size[i])
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Pix2StructProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
# For now the processor supports only ["flattened_patches", "input_ids", "attention_mask", "decoder_attention_mask"]
self.assertListEqual(
list(inputs.keys()), ["flattened_patches", "attention_mask", "decoder_attention_mask", "decoder_input_ids"]
)
inputs = processor(text=input_str)
# For now the processor supports only ["flattened_patches", "input_ids", "attention_mask", "decoder_attention_mask"]
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available():
from PIL import Image
from transformers import Pix2StructImageProcessor
class Pix2StructImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
size=None,
do_normalize=True,
do_convert_rgb=True,
patch_size=None,
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.do_normalize = do_normalize
self.do_convert_rgb = do_convert_rgb
self.max_patches = [512, 1024, 2048, 4096]
self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
def prepare_image_processor_dict(self):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def prepare_dummy_image(self):
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_torch
@require_vision
class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Pix2StructImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = Pix2StructImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_expected_patches(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
image_processor = self.image_processing_class(**self.image_processor_dict)
max_patch = 2048
inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606), atol=1e-3, rtol=1e-3))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_vqa(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
image_processor.is_vqa = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(ValueError):
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
dummy_text = "Hello"
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch, header_text=dummy_text
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch, header_text=dummy_text
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy_4_channels(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch, input_data_format="channels_first"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch, input_data_format="channels_first"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
self.image_processor_tester.num_channels = 3
def test_call_pytorch(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_torch
@require_vision
class Pix2StructImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Pix2StructImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = Pix2StructImageProcessingTester(self, num_channels=4)
self.expected_encoded_image_num_channels = 3
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@unittest.skip("Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self):
return super().test_call_numpy()
@unittest.skip("Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self):
return super().test_call_torch()
@unittest.skip("Pix2StructImageProcessor does treat numpy and PIL 4 channel images consistently") # FIXME Amy
def test_call_numpy_4_channels(self):
return super().test_call_torch()
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/pix2struct/test_modeling_pix2struct.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Pix2Struct model. """
import copy
import inspect
import os
import tempfile
import unittest
import numpy as np
import requests
from transformers import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
Pix2StructForConditionalGeneration,
Pix2StructProcessor,
Pix2StructTextModel,
Pix2StructVisionModel,
)
from transformers.models.pix2struct.modeling_pix2struct import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
is_torch_greater_or_equal_than_1_11 = False
if is_vision_available():
from PIL import Image
class Pix2StructVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=12,
patch_embed_hidden_size=12,
projection_dim=32,
max_patches=64,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=1e-10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_embed_hidden_size = patch_embed_hidden_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.max_patches = max_patches
self.seq_length = self.max_patches
self.patch_proj_dim = ((patch_size**2) * num_channels) + 2
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
flattened_patches = floats_tensor([self.batch_size, self.max_patches, self.patch_proj_dim])
config = self.get_config()
return config, flattened_patches
def get_config(self):
return Pix2StructVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
patch_embed_hidden_size=self.patch_embed_hidden_size,
)
def create_and_check_model(self, config, flattened_patches):
model = Pix2StructVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(flattened_patches)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, flattened_patches = config_and_inputs
inputs_dict = {
"flattened_patches": flattened_patches,
"attention_mask": torch.randint(0, 2, (self.batch_size, self.max_patches)),
}
return config, inputs_dict
@require_torch
class Pix2StructVisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Pix2Struct does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Pix2StructVisionModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = Pix2StructVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Pix2StructVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Pix2StructVision does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["flattened_patches"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_training(self):
pass
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Pix2StructVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="Pix2StructVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = Pix2StructVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class Pix2StructTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=12,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.d_kv = hidden_size // num_attention_heads
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return Pix2StructTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
d_kv=self.d_kv,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = Pix2StructTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class Pix2StructTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Pix2StructTextModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = Pix2StructTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Pix2StructTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_training(self):
pass
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Pix2Struct does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Pix2StructTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="Pix2StructTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = Pix2StructTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class Pix2StructModelTester:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = Pix2StructTextModelTester(parent, **text_kwargs)
self.vision_model_tester = Pix2StructVisionModelTester(parent, **vision_kwargs)
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, flattened_patches = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config(text_config, vision_config)
return config, input_ids, attention_mask, flattened_patches
def get_config(self, text_config, vision_config):
return Pix2StructConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, decoder_attention_mask, flattened_patches = config_and_inputs
attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
inputs_dict = {
"decoder_input_ids": input_ids,
"labels": input_ids,
"decoder_attention_mask": decoder_attention_mask,
"flattened_patches": flattened_patches,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Pix2StructForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-to-text": Pix2StructForConditionalGeneration} if is_torch_available() else {}
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = True
test_attention_outputs = False
test_torchscript = False
def setUp(self):
self.model_tester = Pix2StructModelTester(self)
def test_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
output = model(**input_dict)
self.assertEqual(
output[1].shape,
(
self.model_tester.vision_model_tester.batch_size,
self.model_tester.text_model_tester.seq_length,
self.model_tester.text_model_tester.vocab_size,
),
)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Pix2StructModel does not have input/output embeddings")
def test_model_common_attributes(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"flattened_patches",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"labels",
"decoder_inputs_embeds",
"use_cache",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_training(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
# hardcode labels to be the same as input_ids
inputs["labels"] = inputs["input_ids"]
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
# hardcode labels to be the same as input_ids
inputs["labels"] = inputs["input_ids"]
loss = model(**inputs).loss
loss.backward()
# override as the `logit_scale` parameter initilization is different for Pix2Struct
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
np.log(1 / 0.07),
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite because `vocab_size` is not an attribute of `Pix2StructConfig` but rather `Pix2StructTextConfig`
def test_resize_tokens_embeddings(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Decoder input ids should be clamped to the maximum size of the vocabulary
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# overwrite because `vocab_size` is not an attribute of `Pix2StructConfig` but rather `Pix2StructTextConfig`
def test_resize_embeddings_untied(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Decoder input ids should be clamped to the maximum size of the vocabulary
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@unittest.skip(reason="Pix2Struct doesn't use tied weights")
def test_tied_model_weights_key_ignore(self):
pass
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
flattened_patches = inputs_dict["flattened_patches"] # Pix2Struct needs flattened_patches
traced_model = torch.jit.trace(model, (input_ids, flattened_patches))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict.keys():
if key not in model_state_dict.keys():
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save Pix2StructConfig and check if we can load Pix2StructVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = Pix2StructVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save Pix2StructConfig and check if we can load Pix2StructTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = Pix2StructTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
# We will verify our results on an image of a stop sign
def prepare_img():
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11,
reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.",
)
@require_vision
@require_torch
@slow
class Pix2StructIntegrationTest(unittest.TestCase):
def test_inference_image_captioning(self):
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device)
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base")
image = prepare_img()
# image only
inputs = processor(images=image, return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs)
self.assertEqual(
processor.decode(predictions[0], skip_special_tokens=True), "A stop sign is on a street corner."
)
def test_batched_inference_image_captioning(self):
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device)
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base")
image_1 = prepare_img()
second_url = (
"https://www.connollycove.com/wp-content/uploads/2019/06/temple-bar-dublin-world-famous-irish-pub.jpg"
)
image_2 = Image.open(requests.get(second_url, stream=True).raw)
# image only
inputs = processor(images=[image_1, image_2], return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs)
self.assertEqual(
processor.decode(predictions[0], skip_special_tokens=True), "A stop sign is on a street corner."
)
self.assertEqual(
processor.decode(predictions[1], skip_special_tokens=True),
"A row of books including The Temple Bar and Guiness.",
)
def test_batched_inference_image_captioning_conditioned(self):
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device)
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base")
image_1 = prepare_img()
second_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/temple-bar-dublin-world-famous-irish-pub.jpg"
image_2 = Image.open(requests.get(second_url, stream=True).raw)
texts = ["A picture of", "An photography of"]
# image only
inputs = processor(images=[image_1, image_2], text=texts, return_tensors="pt", add_special_tokens=False).to(
torch_device
)
predictions = model.generate(**inputs)
self.assertEqual(
processor.decode(predictions[0], skip_special_tokens=True),
"A picture of a stop sign with a red stop sign",
)
self.assertEqual(
processor.decode(predictions[1], skip_special_tokens=True),
"An photography of the Temple Bar and other places in the city.",
)
def test_vqa_model(self):
model_id = "google/pix2struct-ai2d-base"
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(image_url, stream=True).raw)
model = Pix2StructForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16).to(
torch_device
)
processor = Pix2StructProcessor.from_pretrained(model_id)
# image only
text = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud"
inputs = processor(images=image, return_tensors="pt", text=text).to(torch_device, torch.bfloat16)
predictions = model.generate(**inputs)
self.assertEqual(processor.decode(predictions[0], skip_special_tokens=True), "ash cloud")
def test_vqa_model_batched(self):
model_id = "google/pix2struct-ai2d-base"
image_urls = [
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg",
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo-2.png",
]
images = [Image.open(requests.get(image_url, stream=True).raw) for image_url in image_urls]
texts = [
"What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud",
"What is the producer in the diagram? (1) Phytoplankton (2) Zooplankton (3) Large fish (4) Small fish",
]
model = Pix2StructForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16).to(
torch_device
)
processor = Pix2StructProcessor.from_pretrained(model_id)
inputs = processor(images=images, return_tensors="pt", text=texts).to(torch_device, torch.bfloat16)
predictions = model.generate(**inputs)
self.assertEqual(processor.decode(predictions[0], skip_special_tokens=True), "ash cloud")
self.assertEqual(processor.decode(predictions[1], skip_special_tokens=True), "Phytoplankton")
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/convbert/test_modeling_tf_convbert.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class TFConvBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 384
self.num_hidden_layers = 2
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.embedding_size = 128
self.head_ratio = 2
self.conv_kernel_size = 9
self.num_groups = 1
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = ConvBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFConvBertModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFConvBertForMaskedLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFConvBertForSequenceClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFConvBertForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFConvBertForTokenClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFConvBertForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFConvBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
test_pruning = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFConvBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output_hidden_states = outputs["encoder_hidden_states"]
output_attentions = outputs["encoder_attentions"]
else:
output_hidden_states = outputs["hidden_states"]
output_attentions = outputs["attentions"]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(output_hidden_states), expected_num_layers)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
@slow
def test_model_from_pretrained(self):
model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
@require_tf
class TFConvBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
input_ids = tf.constant([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
expected_shape = [1, 6, 768]
self.assertEqual(output.shape, expected_shape)
expected_slice = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
]
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/convbert/test_modeling_convbert.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ConvBERT model. """
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertModel,
)
from transformers.models.convbert.modeling_convbert import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class ConvBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return ConvBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ConvBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ConvBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ConvBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ConvBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ConvBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = ConvBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
ConvBertModel,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": ConvBertModel,
"fill-mask": ConvBertForMaskedLM,
"question-answering": ConvBertForQuestionAnswering,
"text-classification": ConvBertForSequenceClassification,
"token-classification": ConvBertForTokenClassification,
"zero-shot": ConvBertForSequenceClassification,
}
if is_torch_available()
else {}
)
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = ConvBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ConvBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Question Answering model returns start_logits and end_logits
if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING):
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],
)
@slow
@require_torch_accelerator
def test_torchscript_device_change(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# ConvBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == ConvBertForMultipleChoice:
return
config.torchscript = True
model = model_class(config=config)
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
traced_model = torch.jit.trace(
model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu"))
)
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(traced_model, os.path.join(tmp, "traced_model.pt"))
loaded = torch.jit.load(os.path.join(tmp, "traced_model.pt"), map_location=torch_device)
loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device))
def test_model_for_input_embeds(self):
batch_size = 2
seq_length = 10
inputs_embeds = torch.rand([batch_size, seq_length, 768], device=torch_device)
config = self.model_tester.get_config()
model = ConvBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(inputs_embeds=inputs_embeds)
self.assertEqual(result.last_hidden_state.shape, (batch_size, seq_length, config.hidden_size))
def test_reducing_attention_heads(self):
config, *inputs_dict = self.model_tester.prepare_config_and_inputs()
config.head_ratio = 4
self.model_tester.create_and_check_for_masked_lm(config, *inputs_dict)
@require_torch
class ConvBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = ConvBertModel.from_pretrained("YituTech/conv-bert-base")
input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 6, 768))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/camembert/test_modeling_tf_camembert.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class TFCamembertModelIntegrationTest(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = TFCamembertModel.from_pretrained("jplu/tf-camembert-base")
input_ids = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]],
dtype=tf.int32,
) # J'aime le camembert !"
output = model(input_ids)["last_hidden_state"]
expected_shape = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],
dtype=tf.float32,
)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/camembert/test_modeling_camembert.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import CamembertModel
@require_torch
@require_sentencepiece
@require_tokenizers
class CamembertModelIntegrationTest(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = CamembertModel.from_pretrained("camembert-base")
model.to(torch_device)
input_ids = torch.tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]],
device=torch_device,
dtype=torch.long,
) # J'aime le camembert !
with torch.no_grad():
output = model(input_ids)["last_hidden_state"]
expected_shape = torch.Size((1, 10, 768))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],
device=torch_device,
dtype=torch.float,
)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/camembert/test_tokenization_camembert.py
|
# coding=utf-8
# Copyright 2018 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import AddedToken, CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
SAMPLE_BPE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
FRAMEWORK = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = CamembertTokenizer
rust_tokenizer_class = CamembertTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = CamembertTokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
@unittest.skip(
"Token maps are not equal because someone set the probability of ('<unk>NOTUSED', -100), so it's never encoded for fast"
)
def test_special_tokens_map_equal(self):
return
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<pad>"
token_id = 1 # 1 is the offset id, but in the spm vocab it's 3
self.assertEqual(self.get_tokenizer().convert_tokens_to_ids(token), token_id)
self.assertEqual(self.get_tokenizer().convert_ids_to_tokens(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>NOTUSED")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-1], "<mask>")
self.assertEqual(len(vocab_keys), 1_005)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
def test_rust_and_python_bpe_tokenizers(self):
tokenizer = CamembertTokenizer(SAMPLE_BPE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
rust_tokenizer = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
sequence = "I was born in 92000, and this is falsé."
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
tokens = tokenizer.convert_ids_to_tokens(ids)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip
# camembert is a french model. So we also use french texts.
sequences = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="camembert-base",
revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf",
sequences=sequences,
)
# Overwritten because we have to use from slow (online pretrained is wrong, the tokenizer.json has a whole)
def test_added_tokens_serialization(self):
self.maxDiff = None
# Utility to test the added vocab
def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir):
tokenizer = tokenizer_class.from_pretrained(temp_dir)
self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
self.assertDictEqual(expected, tokenizer.added_tokens_decoder)
return tokenizer
new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
# Load a slow tokenizer from the hub, init with the new token for fast to also include it
tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos)
EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder
with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"):
self.assertEqual(tokenizer._eos_token, new_eos)
self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values()))
with tempfile.TemporaryDirectory() as tmp_dir_2:
tokenizer.save_pretrained(tmp_dir_2)
with self.subTest(
"Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class"
):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2
)
if self.rust_tokenizer_class is not None:
with self.subTest(
"Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class"
):
tokenizer_fast = _test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2
)
with tempfile.TemporaryDirectory() as tmp_dir_3:
tokenizer_fast.save_pretrained(tmp_dir_3)
with self.subTest(
"Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class"
):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
)
with self.subTest(
"Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class"
):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
)
with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"):
if self.rust_tokenizer_class is not None:
tokenizer_fast = self.rust_tokenizer_class.from_pretrained(
pretrained_name, eos_token=new_eos, from_slow=True
)
self.assertEqual(tokenizer_fast._eos_token, new_eos)
self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
# We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder)
EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
with tempfile.TemporaryDirectory() as tmp_dir_4:
tokenizer_fast.save_pretrained(tmp_dir_4)
with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4
)
with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"):
_test_added_vocab_and_eos(
EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/big_bird/test_tokenization_big_bird.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SPIECE_UNDERLINE = "▁"
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class BigBirdTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BigBirdTokenizer
rust_tokenizer_class = BigBirdTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = self.tokenizer_class(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "[MASK]")
self.assertEqual(len(vocab_keys), 1_004)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_full_tokenizer(self):
tokenizer = BigBirdTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[285, 46, 10, 170, 382],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [65, 18536, 2260, 101, 66]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenization_base_hard_symbols(self):
symbols = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
original_tokenizer_encodings = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # fmt: skip
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt", return_token_type_ids=False)
batch_encoded_sequence = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False
)
config = BigBirdConfig(attention_type="original_full")
model = BigBirdModel(config)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
@slow
def test_special_tokens(self):
"""
To reproduce:
$ wget https://github.com/google-research/bigbird/blob/master/bigbird/vocab/gpt2.model?raw=true
$ mv gpt2.model?raw=true gpt2.model
```
import tensorflow_text as tft
import tensorflow as tf
vocab_model_file = "./gpt2.model"
tokenizer = tft.SentencepieceTokenizer(model=tf.io.gfile.GFile(vocab_model_file, "rb").read()))
ids = tokenizer.tokenize("Paris is the [MASK].")
ids = tf.concat([tf.constant([65]), ids, tf.constant([66])], axis=0)
detokenized = tokenizer.detokenize(ids) # should give [CLS] Paris is the [MASK].[SEP]
"""
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
decoded_text = tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids)
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]")
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="google/bigbird-roberta-base",
revision="215c99f1600e06f83acce68422f2035b2b5c3510",
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/big_bird/test_modeling_big_bird.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BigBird model. """
import unittest
from transformers import BigBirdConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.models.big_bird.tokenization_big_bird import BigBirdTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdModel,
)
from transformers.models.big_bird.modeling_big_bird import BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST
class BigBirdModelTester:
def __init__(
self,
parent,
batch_size=7,
seq_length=128,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu_new",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=256,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
attention_type="block_sparse",
use_bias=True,
rescale_embeddings=False,
block_size=8,
num_rand_blocks=3,
position_embedding_type="absolute",
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.attention_type = attention_type
self.use_bias = use_bias
self.rescale_embeddings = rescale_embeddings
self.block_size = block_size
self.num_rand_blocks = num_rand_blocks
self.position_embedding_type = position_embedding_type
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return BigBirdConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_encoder_decoder=False,
initializer_range=self.initializer_range,
attention_type=self.attention_type,
use_bias=self.use_bias,
rescale_embeddings=self.rescale_embeddings,
block_size=self.block_size,
num_random_blocks=self.num_rand_blocks,
position_embedding_type=self.position_embedding_type,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, config.num_labels))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BigBirdForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = BigBirdForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BigBirdForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BigBirdForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = BigBirdForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def create_and_check_for_auto_padding(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_change_to_full_attn(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# the config should not be changed
self.parent.assertTrue(model.config.attention_type == "block_sparse")
@require_torch
class BigBirdModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
# head masking & pruning is currently not supported for big bird
test_head_masking = False
test_pruning = False
# torchscript should be possible, but takes prohibitively long to test.
# Also torchscript is not an important feature to have in the beginning.
test_torchscript = False
all_model_classes = (
(
BigBirdModel,
BigBirdForPreTraining,
BigBirdForMaskedLM,
BigBirdForCausalLM,
BigBirdForMultipleChoice,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BigBirdForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BigBirdModel,
"fill-mask": BigBirdForMaskedLM,
"question-answering": BigBirdForQuestionAnswering,
"text-classification": BigBirdForSequenceClassification,
"text-generation": BigBirdForCausalLM,
"token-classification": BigBirdForTokenClassification,
"zero-shot": BigBirdForSequenceClassification,
}
if is_torch_available()
else {}
)
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = BigBirdModelTester(self)
self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_retain_grad_hidden_states_attentions(self):
# bigbird cannot keep gradients in attentions when `attention_type=block_sparse`
if self.model_tester.attention_type == "original_full":
super().test_retain_grad_hidden_states_attentions()
@slow
def test_model_from_pretrained(self):
for model_name in BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BigBirdForPreTraining.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_model_various_attn_type(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["original_full", "block_sparse"]:
config_and_inputs[0].attention_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_fast_integration(self):
# fmt: off
input_ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73],[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 12, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 28, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 18, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
input_ids = input_ids % self.model_tester.vocab_size
input_ids[1] = input_ids[1] - 1
attention_mask = torch.ones((input_ids.shape), device=torch_device)
attention_mask[:, :-10] = 0
config, _, _, _, _, _, _ = self.model_tester.prepare_config_and_inputs()
torch.manual_seed(0)
model = BigBirdModel(config).eval().to(torch_device)
with torch.no_grad():
hidden_states = model(input_ids, attention_mask=attention_mask).last_hidden_state
self.assertTrue(
torch.allclose(
hidden_states[0, 0, :5],
torch.tensor([1.4825, 0.0774, 0.8226, -0.2962, -0.9593], device=torch_device),
atol=1e-3,
)
)
def test_auto_padding(self):
self.model_tester.seq_length = 241
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_auto_padding(*config_and_inputs)
def test_for_change_to_full_attn(self):
self.model_tester.seq_length = 9
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs)
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
# overwrite from common in order to skip the check on `attentions`
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)
@require_torch
@slow
class BigBirdModelIntegrationTest(unittest.TestCase):
# we can have this true once block_sparse attn_probs works accurately
test_attention_probs = False
def _get_dummy_input_ids(self):
# fmt: off
ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def test_inference_block_sparse_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="block_sparse")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device)
with torch.no_grad():
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 4096, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[-0.5583, 0.0475, -0.2508, 7.4423],
[0.7409, 1.4460, -0.7593, 7.7010],
[1.9150, 3.1395, 5.8840, 9.3498],
[-0.1854, -1.4640, -2.2052, 3.7968],
],
device=torch_device,
)
self.assertTrue(
torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
)
expected_seq_relationship_logits = torch.tensor([[46.9465, 47.9517]], device=torch_device)
self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
def test_inference_full_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="original_full")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device)
with torch.no_grad():
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 512 * 4, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[0.1499, -1.1217, 0.1990, 8.4499],
[-2.7757, -3.0687, -4.8577, 7.5156],
[1.5446, 0.1982, 4.3016, 10.4281],
[-1.3705, -4.0130, -3.9629, 5.1526],
],
device=torch_device,
)
self.assertTrue(
torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
)
expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device)
self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
def test_block_sparse_attention_probs(self):
"""
Asserting if outputted attention matrix is similar to hard coded attention matrix
"""
if not self.test_attention_probs:
return
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
hidden_states = model.embeddings(input_ids)
batch_size, seqlen, _ = hidden_states.size()
attn_mask = torch.ones(batch_size, seqlen, device=torch_device, dtype=torch.float)
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = config.block_size
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
from_blocked_mask = to_blocked_mask = blocked_mask
for i in range(config.num_hidden_layers):
pointer = model.encoder.layer[i].attention.self
query_layer = pointer.transpose_for_scores(pointer.query(hidden_states))
key_layer = pointer.transpose_for_scores(pointer.key(hidden_states))
value_layer = pointer.transpose_for_scores(pointer.value(hidden_states))
context_layer, attention_probs = pointer.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
pointer.num_attention_heads,
pointer.num_random_blocks,
pointer.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=pointer.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=True,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
cl = torch.einsum("bhqk,bhkd->bhqd", attention_probs, value_layer)
cl = cl.view(context_layer.size())
self.assertTrue(torch.allclose(context_layer, cl, atol=0.001))
def test_block_sparse_context_layer(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
dummy_hidden_states = model.embeddings(input_ids)
attn_mask = torch.ones_like(input_ids, device=torch_device)
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
targeted_cl = torch.tensor(
[
[0.1870, 1.5248, 0.2333, -0.0483, -0.0952, 1.8359, -0.0142, 0.1239, 0.0083, -0.0045],
[-0.0601, 0.1243, 0.1329, -0.1524, 0.2347, 0.0894, -0.2248, -0.2461, -0.0645, -0.0109],
[-0.0418, 0.1463, 0.1290, -0.1638, 0.2489, 0.0799, -0.2341, -0.2406, -0.0524, 0.0106],
[0.1859, 1.5182, 0.2324, -0.0473, -0.0952, 1.8295, -0.0148, 0.1242, 0.0080, -0.0045],
[0.1879, 1.5300, 0.2334, -0.0480, -0.0967, 1.8428, -0.0137, 0.1256, 0.0087, -0.0050],
[0.1852, 1.5149, 0.2330, -0.0492, -0.0936, 1.8236, -0.0154, 0.1210, 0.0080, -0.0048],
[0.1857, 1.5186, 0.2331, -0.0484, -0.0940, 1.8285, -0.0148, 0.1224, 0.0077, -0.0045],
[0.1884, 1.5336, 0.2334, -0.0469, -0.0974, 1.8477, -0.0132, 0.1266, 0.0085, -0.0046],
[0.1881, 1.5308, 0.2334, -0.0479, -0.0969, 1.8438, -0.0136, 0.1258, 0.0088, -0.0050],
[0.1849, 1.5143, 0.2329, -0.0491, -0.0930, 1.8230, -0.0156, 0.1209, 0.0074, -0.0047],
[0.1878, 1.5299, 0.2333, -0.0472, -0.0967, 1.8434, -0.0137, 0.1257, 0.0084, -0.0048],
[0.1873, 1.5260, 0.2333, -0.0478, -0.0961, 1.8383, -0.0142, 0.1245, 0.0083, -0.0048],
[0.1849, 1.5145, 0.2327, -0.0491, -0.0935, 1.8237, -0.0156, 0.1215, 0.0083, -0.0046],
[0.1866, 1.5232, 0.2332, -0.0488, -0.0950, 1.8342, -0.0143, 0.1237, 0.0084, -0.0047],
],
device=torch_device,
)
context_layer = model.encoder.layer[0].attention.self(
dummy_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_mask,
to_blocked_mask=blocked_mask,
)
context_layer = context_layer[0]
self.assertEqual(context_layer.shape, torch.Size((1, 128, 768)))
self.assertTrue(torch.allclose(context_layer[0, 64:78, 300:310], targeted_cl, atol=0.0001))
def test_tokenizer_inference(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
text = [
"Transformer-based models are unable to process long sequences due to their self-attention operation,"
" which scales quadratically with the sequence length. To address this limitation, we introduce the"
" Longformer with an attention mechanism that scales linearly with sequence length, making it easy to"
" process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in"
" replacement for the standard self-attention and combines a local windowed attention with a task"
" motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer"
" on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In"
" contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream"
" tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new"
" state-of-the-art results on WikiHop and TriviaQA."
]
inputs = tokenizer(text)
for k in inputs:
inputs[k] = torch.tensor(inputs[k], device=torch_device, dtype=torch.long)
prediction = model(**inputs)
prediction = prediction[0]
self.assertEqual(prediction.shape, torch.Size((1, 199, 768)))
expected_prediction = torch.tensor(
[
[0.1887, -0.0474, 0.2604, 0.1453],
[0.0651, 0.1999, 0.1797, 0.1161],
[0.2833, -0.3036, 0.6910, 0.1123],
[0.2836, -0.4644, -0.0111, 0.1530],
[0.3919, -0.2823, 0.4192, 0.1687],
[0.2168, -0.1956, 0.4050, 0.0925],
[0.2597, -0.0884, 0.1258, 0.1119],
[0.1127, -0.1203, 0.1924, 0.2859],
[0.1362, -0.1315, 0.2693, 0.1027],
[-0.3169, -0.2266, 0.4419, 0.6740],
[0.2366, -0.1452, 0.2589, 0.0579],
[0.0358, -0.2021, 0.3112, -0.1392],
],
device=torch_device,
)
self.assertTrue(torch.allclose(prediction[0, 52:64, 320:324], expected_prediction, atol=1e-4))
def test_inference_question_answering(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-base-trivia-itc")
model = BigBirdForQuestionAnswering.from_pretrained(
"google/bigbird-base-trivia-itc", attention_type="block_sparse", block_size=16, num_random_blocks=3
)
model.to(torch_device)
context = (
"The BigBird model was proposed in Big Bird: Transformers for Longer Sequences by Zaheer, Manzil and"
" Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago"
" and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a"
" sparse-attention based transformer which extends Transformer based models, such as BERT to much longer"
" sequences. In addition to sparse attention, BigBird also applies global attention as well as random"
" attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and"
" random attention approximates full attention, while being computationally much more efficient for longer"
" sequences. As a consequence of the capability to handle longer context, BigBird has shown improved"
" performance on various long document NLP tasks, such as question answering and summarization, compared"
" to BERT or RoBERTa."
)
question = [
"Which is better for longer sequences- BigBird or BERT?",
"What is the benefit of using BigBird over BERT?",
]
inputs = tokenizer(
question,
[context, context],
padding=True,
return_tensors="pt",
add_special_tokens=True,
max_length=256,
truncation=True,
)
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
start_logits, end_logits = model(**inputs).to_tuple()
# fmt: off
target_start_logits = torch.tensor(
[[-8.5622, -9.6209, -14.3351, -8.7032, -11.8596, -7.7446, -9.6730, -13.6063, -8.9651, -11.7417, -8.2641, -8.7056, -13.4116, -5.6600, -8.8316, -10.4148, -12.2180, -7.7979, -12.5274, -6.0685, -10.3373, -11.3128, -6.6456, -14.4030, -6.8292, -14.5383, -11.5638, -6.3326, 11.5293, -1.8434, -10.0013, -7.6150], [-10.7384, -13.1179, -10.1837, -13.7700, -10.0186, -11.7335, -13.3411, -10.0188, -13.4235, -9.9381, -10.4252, -13.1281, -8.2022, -10.4326, -11.5542, -14.1549, -10.7546, -13.4691, -8.2744, -11.4324, -13.3773, -9.8284, -14.5825, -8.7471, -14.7050, -8.0364, -11.3627, -6.4638, -11.7031, -14.3446, -9.9425, -8.0088]], # noqa: E231
device=torch_device,
)
target_end_logits = torch.tensor(
[[-12.1736, -8.8487, -14.8877, -11.6713, -15.1165, -12.2396, -7.6828, -15.4153, -12.2528, -14.3671, -12.3596, -7.4272, -14.9615, -13.6356, -11.7939, -9.9767, -14.8112, -8.9567, -15.8798, -11.5291, -9.4249, -14.7544, -7.9387, -16.2789, -8.9702, -15.3111, -11.5585, -7.9992, -4.1127, 10.3209, -8.3926, -10.2005], [-11.1375, -15.4027, -12.6861, -16.9884, -13.7093, -10.3560, -15.7228, -12.9290, -15.8519, -13.7953, -10.2460, -15.7198, -14.2078, -12.8477, -11.4861, -16.1017, -11.8900, -16.4488, -13.2959, -10.3980, -15.4874, -10.3539, -16.8263, -10.9973, -17.0344, -9.2751, -10.1196, -13.8907, -12.1025, -13.0628, -12.8530, -13.8173]], # noqa: E321
device=torch_device,
)
# fmt: on
self.assertTrue(torch.allclose(start_logits[:, 64:96], target_start_logits, atol=1e-4))
self.assertTrue(torch.allclose(end_logits[:, 64:96], target_end_logits, atol=1e-4))
input_ids = inputs["input_ids"].tolist()
answer = [
input_ids[i][torch.argmax(start_logits, dim=-1)[i] : torch.argmax(end_logits, dim=-1)[i] + 1]
for i in range(len(input_ids))
]
answer = tokenizer.batch_decode(answer)
self.assertTrue(answer == ["BigBird", "global attention"])
def test_fill_mask(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base")
model.to(torch_device)
input_ids = tokenizer("The goal of life is [MASK] .", return_tensors="pt").input_ids.to(torch_device)
logits = model(input_ids).logits
# [MASK] is token at 6th position
pred_token = tokenizer.decode(torch.argmax(logits[0, 6:7], axis=-1))
self.assertEqual(pred_token, "happiness")
def test_auto_padding(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long)
with torch.no_grad():
output = model(input_ids).to_tuple()[0]
# fmt: off
target = torch.tensor(
[[-0.129420, -0.164740, 0.042422, -0.336030, 0.094379, 0.033794, 0.384590, 0.229660, -0.196500, 0.108020], [-0.000154, -0.168800, 0.165820, -0.313670, 0.101240, 0.035145, 0.381880, 0.213730, -0.201080, 0.077443], [0.053754, -0.166350, 0.225520, -0.272900, 0.119670, 0.019987, 0.348670, 0.199190, -0.181600, 0.084640], [0.063636, -0.187110, 0.237010, -0.297380, 0.126300, 0.020025, 0.268490, 0.191820, -0.192300, 0.035077], [0.073893, -0.184790, 0.188870, -0.297860, 0.134280, 0.028972, 0.174650, 0.186890, -0.180530, 0.006851], [0.005253, -0.169360, 0.123100, -0.302550, 0.126930, 0.024188, 0.133410, 0.200600, -0.168210, -0.001006], [-0.093336, -0.175370, -0.004768, -0.333170, 0.114330, 0.034168, 0.120960, 0.203570, -0.162810, -0.005757], [-0.160210, -0.169310, -0.049064, -0.331950, 0.115730, 0.027062, 0.143600, 0.205310, -0.144580, 0.026746], [-0.193200, -0.156820, -0.079422, -0.351600, 0.106450, 0.032174, 0.245690, 0.210250, -0.173480, 0.043914], [-0.167980, -0.153050, -0.059764, -0.357890,0.103910, 0.031481, 0.334190, 0.208960,-0.178180, 0.072165], [-0.136990, -0.156950, -0.012099, -0.353140,0.096996, 0.025864, 0.376340, 0.216050, -0.171820, 0.089963], [-0.041143, -0.167060, 0.079754, -0.353220, 0.093247, 0.019867, 0.385810, 0.214340, -0.191800, 0.065946],[0.040373, -0.158610, 0.152570, -0.312930, 0.110590, 0.012282, 0.345270, 0.204040, -0.176500, 0.064972], [0.043762, -0.166450, 0.179500, -0.317930, 0.117280, -0.004040, 0.304490, 0.201380, -0.182780, 0.044000]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertEqual(output.shape, torch.Size((1, 241, 768)))
self.assertTrue(torch.allclose(output[0, 64:78, 300:310], target, atol=0.0001))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/big_bird/test_modeling_flax_big_bird.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class FlaxBigBirdModelTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=2,
seq_length=56,
is_training=True,
use_attention_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=7,
hidden_act="gelu_new",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_choices=4,
attention_type="block_sparse",
use_bias=True,
rescale_embeddings=False,
block_size=2,
num_random_blocks=3,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
self.rescale_embeddings = rescale_embeddings
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = BigBirdConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
attention_type=self.attention_type,
block_size=self.block_size,
num_random_blocks=self.num_random_blocks,
use_bias=self.use_bias,
rescale_embeddings=self.rescale_embeddings,
)
return config, input_ids, token_type_ids, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, attention_mask = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class FlaxBigBirdModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
test_attn_probs = False
test_mismatched_shapes = False
def setUp(self):
self.model_tester = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def test_from_pretrained_save_pretrained(self):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def test_from_pretrained_with_no_automatic_init(self):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def test_no_automatic_init(self):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def test_hidden_states_output(self):
super().test_hidden_states_output()
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("google/bigbird-roberta-base")
self.assertIsNotNone(model)
def test_attention_outputs(self):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, attention_mask=None, **kwargs):
return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
# overwrite from common in order to skip the check on `attentions`
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/owlvit/test_processor_owlvit.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class OwlViTProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
image_processor_map = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(image_processor_map, fp)
def get_tokenizer(self, **kwargs):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="!", **kwargs)
def get_rust_tokenizer(self, **kwargs):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="!", **kwargs)
def get_image_processor(self, **kwargs):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
tokenizer_slow = self.get_tokenizer()
tokenizer_fast = self.get_rust_tokenizer()
image_processor = self.get_image_processor()
processor_slow = OwlViTProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
processor_slow.save_pretrained(self.tmpdirname)
processor_slow = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=False)
processor_fast = OwlViTProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
processor_fast.save_pretrained(self.tmpdirname)
processor_fast = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer)
self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, OwlViTImageProcessor)
self.assertIsInstance(processor_fast.image_processor, OwlViTImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_normalize=False)
processor = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", pad_token="!", do_normalize=False
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, OwlViTImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_image_proc = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
encoded_processor = processor(text=input_str, return_tensors="np")
encoded_tok = tokenizer(input_str, return_tensors="np")
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist())
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_processor_with_text_list(self):
model_name = "google/owlvit-base-patch32"
processor = OwlViTProcessor.from_pretrained(model_name)
input_text = ["cat", "nasa badge"]
inputs = processor(text=input_text)
seq_length = 16
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape, (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_processor_with_nested_text_list(self):
model_name = "google/owlvit-base-patch32"
processor = OwlViTProcessor.from_pretrained(model_name)
input_texts = [["cat", "nasa badge"], ["person"]]
inputs = processor(text=input_texts)
seq_length = 16
batch_size = len(input_texts)
num_max_text_queries = max([len(texts) for texts in input_texts])
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape, (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_processor_case(self):
model_name = "google/owlvit-base-patch32"
processor = OwlViTProcessor.from_pretrained(model_name)
input_texts = ["cat", "nasa badge"]
inputs = processor(text=input_texts)
seq_length = 16
input_ids = inputs["input_ids"]
predicted_ids = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape, (2, seq_length))
self.assertListEqual(list(input_ids[0]), predicted_ids[0])
self.assertListEqual(list(input_ids[1]), predicted_ids[1])
def test_processor_case2(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
query_input = self.prepare_image_inputs()
inputs = processor(images=image_input, query_images=query_input)
self.assertListEqual(list(inputs.keys()), ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/owlvit/test_image_processing_owlvit.py
|
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import OwlViTImageProcessor
class OwlViTImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size if size is not None else {"height": 18, "width": 18}
self.do_center_crop = do_center_crop
self.crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class OwlViTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = OwlViTImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = OwlViTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/owlvit/test_modeling_owlvit.py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch OwlViT model. """
import inspect
import os
import tempfile
import unittest
import numpy as np
import requests
from transformers import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
require_torch_fp16,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import OwlViTForObjectDetection, OwlViTModel, OwlViTTextModel, OwlViTVisionModel
from transformers.models.owlvit.modeling_owlvit import OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import OwlViTProcessor
class OwlViTVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=32,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return OwlViTVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = OwlViTVisionModel(config=config).to(torch_device)
model.eval()
pixel_values = pixel_values.to(torch.float32)
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
num_patches = (self.image_size // self.patch_size) ** 2
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class OwlViTVisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as OWLVIT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (OwlViTVisionModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = OwlViTVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=OwlViTVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="OWLVIT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="OwlViTVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OwlViTVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class OwlViTTextModelTester:
def __init__(
self,
parent,
batch_size=12,
num_queries=4,
seq_length=16,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
num_hidden_layers=12,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=16,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_queries = num_queries
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size * self.num_queries, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size * self.num_queries, self.seq_length])
if input_mask is not None:
num_text, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(num_text,))
for idx, start_index in enumerate(rnd_start_indices):
input_mask[idx, :start_index] = 1
input_mask[idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return OwlViTTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = OwlViTTextModel(config=config).to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids=input_ids, attention_mask=input_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size * self.num_queries, self.seq_length, self.hidden_size)
)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_queries, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class OwlViTTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (OwlViTTextModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = OwlViTTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=OwlViTTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="OWLVIT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="OwlViTTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="OwlViTTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OwlViTTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class OwlViTModelTester:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = OwlViTTextModelTester(parent, **text_kwargs)
self.vision_model_tester = OwlViTVisionModelTester(parent, **vision_kwargs)
self.is_training = is_training
self.text_config = self.text_model_tester.get_config().to_dict()
self.vision_config = self.vision_model_tester.get_config().to_dict()
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return OwlViTConfig.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = OwlViTModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
)
image_logits_size = (
self.vision_model_tester.batch_size,
self.text_model_tester.batch_size * self.text_model_tester.num_queries,
)
text_logits_size = (
self.text_model_tester.batch_size * self.text_model_tester.num_queries,
self.vision_model_tester.batch_size,
)
self.parent.assertEqual(result.logits_per_image.shape, image_logits_size)
self.parent.assertEqual(result.logits_per_text.shape, text_logits_size)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"return_loss": False,
}
return config, inputs_dict
@require_torch
class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (OwlViTModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": OwlViTModel, "zero-shot-object-detection": OwlViTForObjectDetection}
if is_torch_available()
else {}
)
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = OwlViTModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="OwlViTModel does not have input/output embeddings")
def test_model_common_attributes(self):
pass
# override as the `logit_scale` parameter initilization is different for OWLVIT
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
if name == "logit_scale":
self.assertAlmostEqual(
param.data.item(),
np.log(1 / 0.07),
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init).to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
pixel_values = inputs_dict["pixel_values"] # OWLVIT needs pixel_values
traced_model = torch.jit.trace(model, (input_ids, pixel_values))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
loaded_model = loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict.keys():
if key not in model_state_dict.keys():
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save OwlViTConfig and check if we can load OwlViTVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = OwlViTVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save OwlViTConfig and check if we can load OwlViTTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = OwlViTTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OwlViTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class OwlViTForObjectDetectionTester:
def __init__(self, parent, is_training=True):
self.parent = parent
self.text_model_tester = OwlViTTextModelTester(parent)
self.vision_model_tester = OwlViTVisionModelTester(parent)
self.is_training = is_training
self.text_config = self.text_model_tester.get_config().to_dict()
self.vision_config = self.vision_model_tester.get_config().to_dict()
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, pixel_values, input_ids, attention_mask
def get_config(self):
return OwlViTConfig.from_text_vision_configs(self.text_config, self.vision_config, projection_dim=64)
def create_and_check_model(self, config, pixel_values, input_ids, attention_mask):
model = OwlViTForObjectDetection(config).to(torch_device).eval()
with torch.no_grad():
result = model(
pixel_values=pixel_values,
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True,
)
pred_boxes_size = (
self.vision_model_tester.batch_size,
(self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2,
4,
)
pred_logits_size = (
self.vision_model_tester.batch_size,
(self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2,
4,
)
pred_class_embeds_size = (
self.vision_model_tester.batch_size,
(self.vision_model_tester.image_size // self.vision_model_tester.patch_size) ** 2,
self.text_model_tester.hidden_size,
)
self.parent.assertEqual(result.pred_boxes.shape, pred_boxes_size)
self.parent.assertEqual(result.logits.shape, pred_logits_size)
self.parent.assertEqual(result.class_embeds.shape, pred_class_embeds_size)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, input_ids, attention_mask = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class OwlViTForObjectDetectionTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (OwlViTForObjectDetection,) if is_torch_available() else ()
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
def setUp(self):
self.model_tester = OwlViTForObjectDetectionTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="OwlViTModel does not have input/output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="Test_initialization is tested in individual model tests")
def test_initialization(self):
pass
@unittest.skip(reason="Test_forward_signature is tested in individual model tests")
def test_forward_signature(self):
pass
@unittest.skip(reason="Test_save_load_fast_init_from_base is tested in individual model tests")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="OWL-ViT does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init).to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
pixel_values = inputs_dict["pixel_values"] # OWLVIT needs pixel_values
traced_model = torch.jit.trace(model, (input_ids, pixel_values))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
loaded_model = loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
non_persistent_buffers = {}
for key in loaded_model_state_dict.keys():
if key not in model_state_dict.keys():
non_persistent_buffers[key] = loaded_model_state_dict[key]
loaded_model_state_dict = {
key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers
}
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
model_buffers = list(model.buffers())
for non_persistent_buffer in non_persistent_buffers.values():
found_buffer = False
for i, model_buffer in enumerate(model_buffers):
if torch.equal(non_persistent_buffer, model_buffer):
found_buffer = True
break
self.assertTrue(found_buffer)
model_buffers.pop(i)
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
@slow
def test_model_from_pretrained(self):
for model_name in OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OwlViTForObjectDetection.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
class OwlViTModelIntegrationTest(unittest.TestCase):
@slow
def test_inference(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTModel.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[3.4613, 0.9403]], device=torch_device)
self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
@slow
def test_inference_object_detection(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=[["a photo of a cat", "a photo of a dog"]],
images=image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
@slow
def test_inference_one_shot_object_detection(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs)
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
expected_slice_boxes = torch.tensor(
[[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.target_pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
@slow
@require_torch_accelerator
@require_torch_fp16
def test_inference_one_shot_object_detection_fp16(self):
model_name = "google/owlvit-base-patch32"
model = OwlViTForObjectDetection.from_pretrained(model_name, torch_dtype=torch.float16).to(torch_device)
processor = OwlViTProcessor.from_pretrained(model_name)
image = prepare_img()
query_image = prepare_img()
inputs = processor(
images=image,
query_images=query_image,
max_length=16,
padding="max_length",
return_tensors="pt",
).to(torch_device)
with torch.no_grad():
outputs = model.image_guided_detection(**inputs)
# No need to check the logits, we just check inference runs fine.
num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2)
self.assertEqual(outputs.target_pred_boxes.shape, torch.Size((1, num_queries, 4)))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/mvp/test_tokenization_mvp.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class TestTokenizationMvp(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MvpTokenizer
rust_tokenizer_class = MvpTokenizerFast
test_rust_tokenizer = True
from_pretrained_filter = filter_roberta_detectors
# from_pretrained_kwargs = {'add_prefix_space': True}
def setUp(self):
super().setUp()
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
return "lower newer", "lower newer"
@cached_property
def default_tokenizer(self):
return MvpTokenizer.from_pretrained("RUCAIBox/mvp")
@cached_property
def default_tokenizer_fast(self):
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp")
@require_torch
def test_prepare_batch(self):
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt")
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 9), batch.input_ids.shape)
self.assertEqual((2, 9), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(expected_src_tokens, result)
# Test that special tokens are reset
@require_torch
def test_prepare_batch_empty_target_text(self):
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
batch = tokenizer(src_text, padding=True, return_tensors="pt")
# check if input_ids are returned and no labels
self.assertIn("input_ids", batch)
self.assertIn("attention_mask", batch)
self.assertNotIn("labels", batch)
self.assertNotIn("decoder_attention_mask", batch)
@require_torch
def test_tokenizer_as_target_length(self):
tgt_text = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt")
self.assertEqual(32, targets["input_ids"].shape[1])
@require_torch
def test_prepare_batch_not_longer_than_maxlen(self):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
batch = tokenizer(
["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt"
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 1024))
@require_torch
def test_special_tokens(self):
src_text = ["A long paragraph for summarization."]
tgt_text = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
input_ids = inputs["input_ids"]
labels = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def test_pretokenized_inputs(self):
pass
def test_embeded_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
)
tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
)
self.assertSequenceEqual(
tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/mvp/test_modeling_mvp.py
|
# coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch MVP model. """
import copy
import tempfile
import unittest
import timeout_decorator # noqa
from transformers import MvpConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_fp16,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpTokenizer,
)
from transformers.models.mvp.modeling_mvp import MvpDecoder, MvpEncoder, shift_tokens_right
def prepare_mvp_inputs_dict(
config,
input_ids,
decoder_input_ids=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class MvpModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_mvp_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return MvpConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def get_pipeline_config(self):
config = self.get_config()
config.max_position_embeddings = 100
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = MvpModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = MvpModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = MvpEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = MvpDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class MvpHeadTests(unittest.TestCase):
vocab_size = 99
def _get_config_and_data(self):
input_ids = torch.tensor(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=torch.long,
device=torch_device,
)
batch_size = input_ids.shape[0]
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
return config, input_ids, batch_size
def test_sequence_classification_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
labels = _long_tensor([2] * batch_size).to(torch_device)
config.num_labels = 3
model = MvpForSequenceClassification(config)
model.to(torch_device)
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels)
expected_shape = torch.Size((batch_size, config.num_labels))
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_question_answering_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
sequence_labels = ids_tensor([batch_size], 2).to(torch_device)
model = MvpForQuestionAnswering(config)
model.to(torch_device)
outputs = model(
input_ids=input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.assertEqual(outputs["start_logits"].shape, input_ids.shape)
self.assertEqual(outputs["end_logits"].shape, input_ids.shape)
self.assertIsInstance(outputs["loss"].item(), float)
@timeout_decorator.timeout(1)
def test_lm_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
lm_model = MvpForConditionalGeneration(config)
lm_model.to(torch_device)
outputs = lm_model(input_ids=input_ids, labels=lm_labels)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_lm_uneven_forward(self):
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=14,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=8,
decoder_ffn_dim=8,
max_position_embeddings=48,
)
lm_model = MvpForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_generate_beam_search(self):
input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long)
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
lm_model = MvpForConditionalGeneration(config).to(torch_device)
lm_model.eval()
max_length = 5
generated_ids = lm_model.generate(
input_ids.clone(),
do_sample=True,
num_return_sequences=1,
num_beams=2,
no_repeat_ngram_size=3,
max_length=max_length,
)
self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length))
def test_shift_tokens_right(self):
input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long)
shifted = shift_tokens_right(input_ids, 1, 2)
n_pad_before = input_ids.eq(1).float().sum()
n_pad_after = shifted.eq(1).float().sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(torch.eq(shifted[:, 0], 2).all())
@slow
def test_tokenization(self):
tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp")
examples = [" Hello world", " DomDramg"] # need leading spaces for equality
fairseq_results = [
torch.tensor([0, 20920, 232, 2]),
torch.tensor([0, 11349, 495, 4040, 571, 2]),
]
for ex, desired_result in zip(examples, fairseq_results):
mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze()
assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex)
@require_torch_fp16
def test_generate_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_dummy_inputs(self):
config, *_ = self._get_config_and_data()
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model(**model.dummy_inputs)
def test_resize_tokens_embeddings_more(self):
config, input_ids, _ = self._get_config_and_data()
def _get_embs(m):
return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone())
model = MvpForConditionalGeneration(config).eval().to(torch_device)
input, output = _get_embs(model)
self.assertTrue(torch.eq(input, output).all())
new_vocab_size = 45
model.resize_token_embeddings(new_vocab_size)
input_new, output_new = _get_embs(model)
self.assertEqual(input_new.shape, (new_vocab_size, config.d_model))
self.assertEqual(output_new.shape, (new_vocab_size, config.d_model))
self.assertTrue(torch.eq(input_new, output_new).all())
@require_torch
class MvpModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(MvpModel, MvpForConditionalGeneration, MvpForSequenceClassification, MvpForQuestionAnswering)
if is_torch_available()
else ()
)
all_generative_model_classes = (MvpForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": MvpForConditionalGeneration,
"feature-extraction": MvpModel,
"fill-mask": MvpForConditionalGeneration,
"question-answering": MvpForQuestionAnswering,
"summarization": MvpForConditionalGeneration,
"text-classification": MvpForSequenceClassification,
"text-generation": MvpForCausalLM,
"text2text-generation": MvpForConditionalGeneration,
"translation": MvpForConditionalGeneration,
"zero-shot": MvpForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = False
test_pruning = False
test_missing_keys = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def setUp(self):
self.model_tester = MvpModelTester(self)
self.config_tester = ConfigTester(self, config_class=MvpConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# MvpForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MvpModel, MvpForConditionalGeneration, MvpForQuestionAnswering):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@require_sentencepiece
@require_tokenizers
class MvpModelIntegrationTests(unittest.TestCase):
@cached_property
def default_tokenizer(self):
return MvpTokenizer.from_pretrained("RUCAIBox/mvp")
@slow
def test_inference_no_head(self):
model = MvpModel.from_pretrained("RUCAIBox/mvp").to(torch_device)
input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = input_ids.ne(model.config.pad_token_id)
with torch.no_grad():
output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
expected_shape = torch.Size((1, 11, 1024))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.3461, 0.3624, 0.2689], [0.3461, 0.3624, 0.2689], [-0.1562, 1.1637, -0.3784]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3))
@slow
def test_summarization_inference(self):
model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp").to(torch_device)
tok = self.default_tokenizer
PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'""" # fmt: skip
EXPECTED_SUMMARY = "Listen to the radio.\nUse the Internet."
dct = tok.batch_encode_plus(
[PGE_ARTICLE],
return_tensors="pt",
).to(torch_device)
hypotheses_batch = model.generate(**dct)
decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True)
self.assertEqual(EXPECTED_SUMMARY, decoded[0])
class MvpStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
encoder_layers=self.decoder_layers,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = MvpDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = MvpDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class MvpStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (MvpDecoder, MvpForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (MvpForCausalLM,) if is_torch_available() else ()
fx_comptatible = True
test_pruning = False
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = MvpStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=MvpConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/lxmert/test_modeling_lxmert.py
|
# coding=utf-8
# Copyright 2018 LXMERT Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
from transformers import LxmertConfig, is_tf_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
)
from transformers.models.lxmert.modeling_lxmert import LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_tf_available():
import tensorflow as tf
class LxmertModelTester:
def __init__(
self,
parent,
vocab_size=300,
hidden_size=28,
num_attention_heads=2,
num_labels=2,
intermediate_size=64,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
num_qa_labels=30,
num_object_labels=16,
num_attr_labels=4,
num_visual_features=10,
l_layers=2,
x_layers=1,
r_layers=1,
visual_feat_dim=128,
visual_pos_dim=4,
visual_loss_normalizer=6.67,
seq_length=20,
batch_size=4,
is_training=True,
task_matched=True,
task_mask_lm=True,
task_obj_predict=True,
task_qa=True,
visual_obj_loss=True,
visual_attr_loss=True,
visual_feat_loss=True,
use_token_type_ids=True,
use_lang_mask=True,
output_attentions=False,
output_hidden_states=False,
scope=None,
):
self.parent = parent
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_labels = num_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.pad_token_id = pad_token_id
self.num_qa_labels = num_qa_labels
self.num_object_labels = num_object_labels
self.num_attr_labels = num_attr_labels
self.l_layers = l_layers
self.x_layers = x_layers
self.r_layers = r_layers
self.visual_feat_dim = visual_feat_dim
self.visual_pos_dim = visual_pos_dim
self.visual_loss_normalizer = visual_loss_normalizer
self.seq_length = seq_length
self.batch_size = batch_size
self.is_training = is_training
self.use_lang_mask = use_lang_mask
self.task_matched = task_matched
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_qa = task_qa
self.visual_obj_loss = visual_obj_loss
self.visual_attr_loss = visual_attr_loss
self.visual_feat_loss = visual_feat_loss
self.num_visual_features = num_visual_features
self.use_token_type_ids = use_token_type_ids
self.output_attentions = output_attentions
self.output_hidden_states = output_hidden_states
self.scope = scope
self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
def prepare_config_and_inputs(self):
output_attentions = self.output_attentions
input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size)
visual_feats = torch.rand(self.batch_size, self.num_visual_features, self.visual_feat_dim, device=torch_device)
bounding_boxes = torch.rand(self.batch_size, self.num_visual_features, 4, device=torch_device)
input_mask = None
if self.use_lang_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
obj_labels = None
if self.task_obj_predict:
obj_labels = {}
if self.visual_attr_loss and self.task_obj_predict:
obj_labels["attr"] = (
ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels),
ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels),
)
if self.visual_feat_loss and self.task_obj_predict:
obj_labels["feat"] = (
ids_tensor(
[self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features
),
ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features),
)
if self.visual_obj_loss and self.task_obj_predict:
obj_labels["obj"] = (
ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels),
ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels),
)
ans = None
if self.task_qa:
ans = ids_tensor([self.batch_size], self.num_qa_labels)
masked_lm_labels = None
if self.task_mask_lm:
masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
matched_label = None
if self.task_matched:
matched_label = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return (
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
)
def get_config(self):
return LxmertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
num_labels=self.num_labels,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
layer_norm_eps=self.layer_norm_eps,
pad_token_id=self.pad_token_id,
num_qa_labels=self.num_qa_labels,
num_object_labels=self.num_object_labels,
num_attr_labels=self.num_attr_labels,
l_layers=self.l_layers,
x_layers=self.x_layers,
r_layers=self.r_layers,
visual_feat_dim=self.visual_feat_dim,
visual_pos_dim=self.visual_pos_dim,
visual_loss_normalizer=self.visual_loss_normalizer,
task_matched=self.task_matched,
task_mask_lm=self.task_mask_lm,
task_obj_predict=self.task_obj_predict,
task_qa=self.task_qa,
visual_obj_loss=self.visual_obj_loss,
visual_attr_loss=self.visual_attr_loss,
visual_feat_loss=self.visual_feat_loss,
output_attentions=self.output_attentions,
output_hidden_states=self.output_hidden_states,
)
def create_and_check_lxmert_model(
self,
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
):
model = LxmertModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
output_attentions=output_attentions,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
output_attentions=not output_attentions,
)
result = model(input_ids, visual_feats, bounding_boxes, return_dict=False)
result = model(input_ids, visual_feats, bounding_boxes, return_dict=True)
self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(
result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size)
)
self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_lxmert_for_question_answering(
self,
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
):
model = LxmertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
labels=ans,
output_attentions=output_attentions,
)
result = model(input_ids, visual_feats, bounding_boxes, labels=ans)
result = model(
input_ids,
visual_feats,
bounding_boxes,
labels=ans,
token_type_ids=token_type_ids,
attention_mask=input_mask,
output_attentions=output_attentions,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
labels=ans,
output_attentions=not output_attentions,
)
self.parent.assertEqual(result.question_answering_score.shape, (self.batch_size, self.num_qa_labels))
def create_and_check_lxmert_for_pretraining(
self,
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
):
model = LxmertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
obj_labels=obj_labels,
matched_label=matched_label,
ans=ans,
output_attentions=output_attentions,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
output_attentions=not output_attentions,
return_dict=False,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
obj_labels=obj_labels,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
matched_label=matched_label,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
ans=ans,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
obj_labels=obj_labels,
matched_label=matched_label,
ans=ans,
output_attentions=not output_attentions,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def resize_lxmert_num_qa_labels(
self,
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
):
start_labels = config.num_qa_labels
num_large_labels = config.num_qa_labels * 2
num_small_labels = int(config.num_qa_labels * 2)
less_labels_ans = ids_tensor([self.batch_size], num_small_labels)
more_labels_ans = ids_tensor([self.batch_size], num_large_labels)
model_pretrain = LxmertForPreTraining(config=config).to(torch_device)
model_qa = LxmertForQuestionAnswering(config=config).to(torch_device)
config.num_labels = num_small_labels
end_labels = config.num_labels
result_pretrain = model_pretrain(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
ans=ans,
)
result_qa = model_qa(
input_ids,
visual_feats,
bounding_boxes,
labels=ans,
token_type_ids=token_type_ids,
attention_mask=input_mask,
)
model_pretrain.resize_num_qa_labels(num_small_labels)
model_qa.resize_num_qa_labels(num_small_labels)
result_pretrain_less = model_pretrain(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
ans=less_labels_ans,
)
result_qa_less = model_qa(
input_ids,
visual_feats,
bounding_boxes,
labels=less_labels_ans,
token_type_ids=token_type_ids,
attention_mask=input_mask,
)
model_pretrain.resize_num_qa_labels(num_large_labels)
model_qa.resize_num_qa_labels(num_large_labels)
result_pretrain_more = model_pretrain(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
ans=more_labels_ans,
)
result_qa_more = model_qa(
input_ids,
visual_feats,
bounding_boxes,
labels=more_labels_ans,
token_type_ids=token_type_ids,
attention_mask=input_mask,
)
model_qa_labels = model_qa.num_qa_labels
self.parent.assertNotEqual(start_labels, end_labels)
self.parent.assertNotEqual(model_qa_labels, start_labels)
self.parent.assertEqual(result_qa.question_answering_score.shape, (self.batch_size, start_labels))
self.parent.assertEqual(result_pretrain.question_answering_score.shape, (self.batch_size, start_labels))
self.parent.assertEqual(result_qa_less.question_answering_score.shape, (self.batch_size, num_small_labels))
self.parent.assertEqual(
result_pretrain_less.question_answering_score.shape, (self.batch_size, num_small_labels)
)
self.parent.assertEqual(result_qa_more.question_answering_score.shape, (self.batch_size, num_large_labels))
self.parent.assertEqual(
result_pretrain_more.question_answering_score.shape, (self.batch_size, num_large_labels)
)
def prepare_config_and_inputs_for_common(self, return_obj_labels=False):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"visual_feats": visual_feats,
"visual_pos": bounding_boxes,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
if return_obj_labels:
inputs_dict["obj_labels"] = obj_labels
else:
config.task_obj_predict = False
return config, inputs_dict
@require_torch
class LxmertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (LxmertModel, LxmertForPreTraining, LxmertForQuestionAnswering) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": LxmertModel, "question-answering": LxmertForQuestionAnswering}
if is_torch_available()
else {}
)
fx_compatible = True
test_head_masking = False
test_pruning = False
test_torchscript = False
# overwrite function because qa models takes different input label shape
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
# special case for models like BERT that use multi-loss training for PreTraining
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = LxmertModelTester(self)
self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_lxmert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_model(*config_and_inputs)
def test_lxmert_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_for_question_answering(*config_and_inputs)
def test_lxmert_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs)
def test_lxmert_question_answering_labels_resize(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.resize_lxmert_num_qa_labels(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = LxmertModel.from_pretrained(model_name)
model.to(torch_device)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
attention_shapes = [
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
[
self.model_tester.num_attention_heads,
self.model_tester.num_visual_features,
self.model_tester.num_visual_features,
],
[self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features],
]
for attention, attention_shape in zip(attentions, attention_shapes):
self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# 2 hidden states were added
self.assertEqual(out_len + 2, len(outputs))
language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
attention_shapes = [
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
[
self.model_tester.num_attention_heads,
self.model_tester.num_visual_features,
self.model_tester.num_visual_features,
],
[self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features],
]
for attention, attention_shape in zip(attentions, attention_shapes):
self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1]
self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1)
self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1)
seq_length = self.model_tester.seq_length
num_visual_features = self.model_tester.num_visual_features
self.assertListEqual(
list(language_hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
self.assertListEqual(
list(vision_hidden_states[0].shape[-2:]),
[num_visual_features, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
hidden_states_lang = outputs.language_hidden_states[0]
attentions_lang = outputs.language_attentions[0]
hidden_states_vision = outputs.vision_hidden_states[0]
attentions_vision = outputs.vision_attentions[0]
hidden_states_lang.retain_grad()
attentions_lang.retain_grad()
hidden_states_vision.retain_grad()
attentions_vision.retain_grad()
outputs.language_output.flatten()[0].backward(retain_graph=True)
outputs.vision_output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states_lang.grad)
self.assertIsNotNone(attentions_vision.grad)
self.assertIsNotNone(hidden_states_vision.grad)
self.assertIsNotNone(attentions_vision.grad)
def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict):
tf_inputs_dict = {}
for key, value in pt_inputs_dict.items():
# skip key that does not exist in tf
if isinstance(value, dict):
tf_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value)
elif isinstance(value, (list, tuple)):
tf_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value)
elif isinstance(value, bool):
tf_inputs_dict[key] = value
elif key == "input_values":
tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32)
elif key == "pixel_values":
tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32)
elif key == "input_features":
tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32)
# other general float inputs
elif value.is_floating_point():
tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32)
else:
tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.int32)
return tf_inputs_dict
@require_torch
class LxmertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_no_head_absolute_embedding(self):
model = LxmertModel.from_pretrained(LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
input_ids = torch.tensor([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]])
num_visual_features = 10
_, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim)
_, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4)
visual_feats = torch.as_tensor(visual_feats, dtype=torch.float32)
visual_pos = torch.as_tensor(visual_pos, dtype=torch.float32)
output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0]
expected_shape = torch.Size([1, 11, 768])
self.assertEqual(expected_shape, output.shape)
expected_slice = torch.tensor(
[[[0.2417, -0.9807, 0.1480], [1.2541, -0.8320, 0.5112], [1.4070, -1.1052, 0.6990]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/lxmert/test_modeling_tf_lxmert.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import tempfile
import unittest
import numpy as np
from transformers import LxmertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel
class TFLxmertModelTester(object):
def __init__(
self,
parent,
vocab_size=300,
hidden_size=28,
num_attention_heads=2,
num_labels=2,
intermediate_size=64,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
num_qa_labels=30,
num_object_labels=16,
num_attr_labels=4,
num_visual_features=10,
l_layers=2,
x_layers=1,
r_layers=1,
visual_feat_dim=128,
visual_pos_dim=4,
visual_loss_normalizer=6.67,
seq_length=20,
batch_size=8,
is_training=True,
task_matched=True,
task_mask_lm=True,
task_obj_predict=True,
task_qa=True,
visual_obj_loss=True,
visual_attr_loss=True,
visual_feat_loss=True,
use_token_type_ids=True,
use_lang_mask=True,
output_attentions=False,
output_hidden_states=False,
scope=None,
):
self.parent = parent
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_labels = num_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.pad_token_id = pad_token_id
self.num_qa_labels = num_qa_labels
self.num_object_labels = num_object_labels
self.num_attr_labels = num_attr_labels
self.l_layers = l_layers
self.x_layers = x_layers
self.r_layers = r_layers
self.visual_feat_dim = visual_feat_dim
self.visual_pos_dim = visual_pos_dim
self.visual_loss_normalizer = visual_loss_normalizer
self.seq_length = seq_length
self.batch_size = batch_size
self.is_training = is_training
self.use_lang_mask = use_lang_mask
self.task_matched = task_matched
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_qa = task_qa
self.visual_obj_loss = visual_obj_loss
self.visual_attr_loss = visual_attr_loss
self.visual_feat_loss = visual_feat_loss
self.num_visual_features = num_visual_features
self.use_token_type_ids = use_token_type_ids
self.output_attentions = output_attentions
self.output_hidden_states = output_hidden_states
self.scope = scope
self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
def prepare_config_and_inputs(self):
output_attentions = self.output_attentions
input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size)
visual_feats = tf.random.uniform((self.batch_size, self.num_visual_features, self.visual_feat_dim))
bounding_boxes = tf.random.uniform((self.batch_size, self.num_visual_features, 4))
input_mask = None
if self.use_lang_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
obj_labels = None
if self.task_obj_predict:
obj_labels = {}
if self.visual_attr_loss and self.task_obj_predict:
obj_labels["attr"] = (
ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels),
ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels),
)
if self.visual_feat_loss and self.task_obj_predict:
obj_labels["feat"] = (
ids_tensor(
[self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features
),
ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features),
)
if self.visual_obj_loss and self.task_obj_predict:
obj_labels["obj"] = (
ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels),
ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels),
)
ans = None
if self.task_qa:
ans = ids_tensor([self.batch_size], self.num_qa_labels)
masked_lm_labels = None
if self.task_mask_lm:
masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
matched_label = None
if self.task_matched:
matched_label = ids_tensor([self.batch_size], self.num_labels)
config = LxmertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
num_labels=self.num_labels,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
layer_norm_eps=self.layer_norm_eps,
pad_token_id=self.pad_token_id,
num_qa_labels=self.num_qa_labels,
num_object_labels=self.num_object_labels,
num_attr_labels=self.num_attr_labels,
l_layers=self.l_layers,
x_layers=self.x_layers,
r_layers=self.r_layers,
visual_feat_dim=self.visual_feat_dim,
visual_pos_dim=self.visual_pos_dim,
visual_loss_normalizer=self.visual_loss_normalizer,
task_matched=self.task_matched,
task_mask_lm=self.task_mask_lm,
task_obj_predict=self.task_obj_predict,
task_qa=self.task_qa,
visual_obj_loss=self.visual_obj_loss,
visual_attr_loss=self.visual_attr_loss,
visual_feat_loss=self.visual_feat_loss,
output_attentions=self.output_attentions,
output_hidden_states=self.output_hidden_states,
)
return (
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
)
def create_and_check_lxmert_model(
self,
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
):
model = TFLxmertModel(config=config)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
output_attentions=output_attentions,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
output_attentions=not output_attentions,
)
result = model(input_ids, visual_feats, bounding_boxes, return_dict=False)
result = model(input_ids, visual_feats, bounding_boxes, return_dict=True)
self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(
result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size)
)
self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self, return_obj_labels=False):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"visual_feats": visual_feats,
"visual_pos": bounding_boxes,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
if return_obj_labels:
inputs_dict["obj_labels"] = obj_labels
else:
config.task_obj_predict = False
return config, inputs_dict
def create_and_check_lxmert_for_pretraining(
self,
config,
input_ids,
visual_feats,
bounding_boxes,
token_type_ids,
input_mask,
obj_labels,
masked_lm_labels,
matched_label,
ans,
output_attentions,
):
model = TFLxmertForPreTraining(config=config)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
obj_labels=obj_labels,
matched_label=matched_label,
ans=ans,
output_attentions=output_attentions,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
output_attentions=not output_attentions,
return_dict=False,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
obj_labels=obj_labels,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
matched_label=matched_label,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
ans=ans,
)
result = model(
input_ids,
visual_feats,
bounding_boxes,
token_type_ids=token_type_ids,
attention_mask=input_mask,
masked_lm_labels=masked_lm_labels,
obj_labels=obj_labels,
matched_label=matched_label,
ans=ans,
output_attentions=not output_attentions,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
@require_tf
class TFLxmertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TFLxmertModel, TFLxmertForPreTraining) if is_tf_available() else ()
pipeline_model_mapping = {"feature-extraction": TFLxmertModel} if is_tf_available() else {}
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFLxmertModelTester(self)
self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_lxmert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_model(*config_and_inputs)
def test_lxmert_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ["unc-nlp/lxmert-base-uncased"]:
model = TFLxmertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
encoder_seq_length = (
self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "encoder_seq_length")
else self.model_tester.seq_length
)
encoder_key_length = (
self.model_tester.key_length if hasattr(self.model_tester, "key_length") else encoder_seq_length
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
self.assertEqual(model.config.output_hidden_states, False)
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
attention_shapes = [
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
[
self.model_tester.num_attention_heads,
self.model_tester.num_visual_features,
self.model_tester.num_visual_features,
],
[self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features],
]
for attention, attention_shape in zip(attentions, attention_shapes):
self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
# 2 hidden states were added
self.assertEqual(out_len + 2, len(outputs))
language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
attention_shapes = [
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
[
self.model_tester.num_attention_heads,
self.model_tester.num_visual_features,
self.model_tester.num_visual_features,
],
[self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features],
]
for attention, attention_shape in zip(attentions, attention_shapes):
self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1]
self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1)
self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1)
seq_length = self.model_tester.seq_length
num_visual_features = self.model_tester.num_visual_features
self.assertListEqual(
list(language_hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
self.assertListEqual(
list(vision_hidden_states[0].shape[-2:]),
[num_visual_features, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
import torch
pt_inputs_dict = {}
for key, value in tf_inputs_dict.items():
if isinstance(value, dict):
pt_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value)
elif isinstance(value, (list, tuple)):
pt_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value)
elif isinstance(key, bool):
pt_inputs_dict[key] = value
elif key == "input_values":
pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
elif key == "pixel_values":
pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
elif key == "input_features":
pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
# other general float inputs
elif tf_inputs_dict[key].dtype.is_floating:
pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
else:
pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.long)
return pt_inputs_dict
def test_save_load(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common(
return_obj_labels="PreTraining" in model_class.__name__
)
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
@require_tf
class TFLxmertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = TFLxmertModel.from_pretrained("unc-nlp/lxmert-base-uncased")
input_ids = tf.constant([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]])
num_visual_features = 10
_, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim)
_, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4)
visual_feats = tf.convert_to_tensor(visual_feats, dtype=tf.float32)
visual_pos = tf.convert_to_tensor(visual_pos, dtype=tf.float32)
output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0]
expected_shape = [1, 11, 768]
self.assertEqual(expected_shape, output.shape)
expected_slice = tf.constant(
[
[
[0.24170142, -0.98075, 0.14797261],
[1.2540525, -0.83198136, 0.5112344],
[1.4070463, -1.1051831, 0.6990401],
]
]
)
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/lxmert/test_tokenization_lxmert.py
|
# coding=utf-8
# Copyright 2018 LXMERT Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class LxmertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LxmertTokenizer
rust_tokenizer_class = LxmertTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/instructblip/test_modeling_instructblip.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch InstructBLIP model. """
import inspect
import tempfile
import unittest
import numpy as np
import requests
from transformers import (
CONFIG_MAPPING,
InstructBlipConfig,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from transformers.testing_utils import (
require_accelerate,
require_bitsandbytes,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
random_attention_mask,
)
if is_torch_available():
import torch
from torch import nn
from transformers import InstructBlipForConditionalGeneration, InstructBlipVisionModel
from transformers.models.instructblip.modeling_instructblip import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class InstructBlipVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=1e-10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in case of a vision transformer, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return InstructBlipVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = InstructBlipVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class InstructBlipVisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as InstructBLIP's vision encoder does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (InstructBlipVisionModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = InstructBlipVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=InstructBlipVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="InstructBLIP's vision encoder does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training")
def test_training(self):
pass
@unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="InstructBlipVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = InstructBlipVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class InstructBlipQFormerModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask
def get_config(self):
return InstructBlipQFormerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
)
# this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py
class InstructBlipTextModelDecoderOnlyTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
embed_dim=16,
num_labels=3,
word_embed_proj_dim=16,
type_sequence_label_size=2,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.embed_dim = embed_dim
self.num_labels = num_labels
self.type_sequence_label_size = type_sequence_label_size
self.word_embed_proj_dim = word_embed_proj_dim
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
config = self.get_config()
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3)
input_ids[:, -1] = self.eos_token_id # Eos Token
attention_mask = input_ids.ne(self.pad_token_id)
return config, input_ids, attention_mask
def get_config(self):
return CONFIG_MAPPING["opt"](
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
embed_dim=self.embed_dim,
is_encoder_decoder=False,
word_embed_proj_dim=self.word_embed_proj_dim,
)
# this model tester uses a decoder-only language model (OPT)
class InstructBlipForConditionalGenerationDecoderOnlyModelTester:
def __init__(
self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10
):
if vision_kwargs is None:
vision_kwargs = {}
if qformer_kwargs is None:
qformer_kwargs = {}
if text_kwargs is None:
text_kwargs = {}
self.parent = parent
self.vision_model_tester = InstructBlipVisionModelTester(parent, **vision_kwargs)
self.qformer_model_tester = InstructBlipQFormerModelTester(parent, **qformer_kwargs)
self.text_model_tester = InstructBlipTextModelDecoderOnlyTester(parent, **text_kwargs)
self.is_training = is_training
self.num_query_tokens = num_query_tokens
def prepare_config_and_inputs(self):
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
_, _, _, qformer_input_ids, qformer_attention_mask = self.qformer_model_tester.prepare_config_and_inputs()
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values
def get_config(self):
return InstructBlipConfig.from_vision_qformer_text_configs(
vision_config=self.vision_model_tester.get_config(),
qformer_config=self.qformer_model_tester.get_config(),
text_config=self.text_model_tester.get_config(),
num_query_tokens=self.num_query_tokens,
)
def create_and_check_for_conditional_generation(
self, config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values
):
model = InstructBlipForConditionalGeneration(config).to(torch_device).eval()
with torch.no_grad():
result = model(
pixel_values,
input_ids=input_ids,
attention_mask=attention_mask,
qformer_input_ids=qformer_input_ids,
qformer_attention_mask=qformer_attention_mask,
)
expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length
self.parent.assertEqual(
result.logits.shape,
(self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"qformer_input_ids": qformer_input_ids,
"qformer_attention_mask": qformer_attention_mask,
"labels": input_ids,
}
return config, inputs_dict
@require_torch
class InstructBlipForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (InstructBlipForConditionalGeneration,) if is_torch_available() else ()
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
test_torchscript = False
def setUp(self):
self.model_tester = InstructBlipForConditionalGenerationDecoderOnlyModelTester(self)
def test_for_conditional_generation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="InstructBlipForConditionalGeneration doesn't support inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Tied weights are tested in individual model tests")
def test_tied_weights_keys(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="InstructBlipModel does not have input/output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="There's no base InstructBlipModel")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="There's no base InstructBlipModel")
def test_save_load_fast_init_to_base(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_load_vision_qformer_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save InstructBlipConfig and check if we can load InstructBlipVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = InstructBlipVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save InstructBlipConfig and check if we can load InstructBlipQFormerConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
qformer_config = InstructBlipQFormerConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict())
@slow
def test_model_from_pretrained(self):
for model_name in INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST:
model = InstructBlipForConditionalGeneration.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg"
image = Image.open(requests.get(url, stream=True).raw)
return image
@require_vision
@require_torch
@slow
class InstructBlipModelIntegrationTest(unittest.TestCase):
@require_bitsandbytes
@require_accelerate
def test_inference_vicuna_7b(self):
processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
model = InstructBlipForConditionalGeneration.from_pretrained(
"Salesforce/instructblip-vicuna-7b", load_in_8bit=True, low_cpu_mem_usage=True
)
url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
prompt = "What is unusual about this image?"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, torch.float16)
# verify logits
with torch.no_grad():
logits = model(**inputs).logits
expected_slice = torch.tensor(
[[-3.4902, -12.5078, 8.4141], [-5.1211, -12.1328, 7.8281], [-4.0312, -13.5938, 9.1172]],
device=torch_device,
)
self.assertTrue(torch.allclose(logits[0, :3, :3].float(), expected_slice, atol=1e-3))
# verify generation
outputs = model.generate(**inputs, max_new_tokens=30)
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
expected_outputs = [2, 450, 22910, 9565, 310, 445, 1967, 338, 393, 263, 767, 338, 13977, 292, 22095, 373, 278, 1250, 310, 263, 13328, 20134, 29963, 1550, 19500, 1623, 263, 19587, 4272, 11952, 29889] # fmt: skip
self.assertEqual(outputs[0].tolist(), expected_outputs)
self.assertEqual(
generated_text,
"The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV while driving down a busy city street.",
)
def test_inference_flant5_xl(self):
processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-flan-t5-xl")
model = InstructBlipForConditionalGeneration.from_pretrained(
"Salesforce/instructblip-flan-t5-xl",
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
).to(torch_device)
url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
prompt = "What is unusual about this image?"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device)
for k, v in inputs.items():
if torch.is_floating_point(v):
inputs[k] = v.to(torch.bfloat16)
outputs = model.generate(
**inputs,
do_sample=False,
num_beams=5,
max_length=256,
min_length=1,
top_p=0.9,
repetition_penalty=1.5,
length_penalty=1.0,
temperature=1,
)
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0]
expected_outputs = [0, 37, 1023, 9850, 7, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4459, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 37, 388, 19, 5119, 3, 9, 4459, 8677, 28, 3, 9, 2756, 4459, 6177, 6, 11, 3, 88, 19, 338, 46, 3575, 53, 1476, 12, 743, 112, 2491, 5, 37, 1023, 19, 7225, 788, 12, 8, 685, 24, 34, 1267, 3, 9, 388, 3575, 53, 4954, 30, 8, 223, 13, 3, 9, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 94, 19, 487, 24, 8, 388, 19, 1119, 12, 1097, 540, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 6, 68, 34, 19, 92, 487, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 3, 13865, 13, 8, 1053, 21, 8, 388, 31, 7, 2874, 6, 34, 19, 964, 24, 3, 88, 19, 1119, 12, 1097, 97, 57, 692, 112, 10428, 30, 8, 223, 13, 8, 4049, 16, 8, 2214, 13, 3, 9, 3164, 690, 2815, 5, 1] # fmt: skip
self.assertEqual(outputs[0].tolist(), expected_outputs)
self.assertEqual(
generated_text,
"The image depicts a man ironing clothes on the back of a yellow van in the middle of a busy city street. The man is wearing a yellow shirt with a bright yellow tie, and he is using an ironing board to complete his task. The image is unusual due to the fact that it shows a man ironing clothes on the back of a van in the middle of a busy city street. It is possible that the man is trying to save money by doing his laundry on the back of the van, but it is also possible that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street. Regardless of the reason for the man's actions, it is clear that he is trying to save time by doing his laundry on the back of the van in the middle of a busy city street.",
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/instructblip/test_processor_instructblip.py
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPT2Tokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class InstructBlipProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = BlipImageProcessor()
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
qformer_tokenizer = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert")
processor = InstructBlipProcessor(image_processor, tokenizer, qformer_tokenizer)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_qformer_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).qformer_tokenizer
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_additional_features(self):
processor = InstructBlipProcessor(
tokenizer=self.get_tokenizer(),
image_processor=self.get_image_processor(),
qformer_tokenizer=self.get_qformer_tokenizer(),
)
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, BlipImageProcessor)
self.assertIsInstance(processor.qformer_tokenizer, BertTokenizerFast)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor = InstructBlipProcessor(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor = InstructBlipProcessor(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
input_str = "lower newer"
encoded_processor = processor(text=input_str)
encoded_tokens = tokenizer(input_str, return_token_type_ids=False)
encoded_tokens_qformer = qformer_tokenizer(input_str, return_token_type_ids=False)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["qformer_" + key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor = InstructBlipProcessor(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()),
["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"],
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor = InstructBlipProcessor(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
qformer_tokenizer = self.get_qformer_tokenizer()
processor = InstructBlipProcessor(
tokenizer=tokenizer, image_processor=image_processor, qformer_tokenizer=qformer_tokenizer
)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()),
["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"],
)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/rag/test_tokenization_rag.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class RagTokenizerTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def test_save_load_pretrained_with_saved_config(self):
save_dir = os.path.join(self.tmpdirname, "rag_tokenizer")
rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict())
rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer())
rag_config.save_pretrained(save_dir)
rag_tokenizer.save_pretrained(save_dir)
new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config)
self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast)
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab())
@slow
def test_pretrained_token_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
input_strings = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
@slow
def test_pretrained_sequence_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
input_strings = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/rag/test_modeling_tf_rag.py
|
from __future__ import annotations
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
import numpy as np
from transformers import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property, is_datasets_available, is_faiss_available, is_tf_available
if is_tf_available() and is_datasets_available() and is_faiss_available():
import faiss
import tensorflow as tf
from datasets import Dataset
from transformers import (
AutoConfig,
RagConfig,
RagRetriever,
RagTokenizer,
TFAutoModel,
TFAutoModelForSeq2SeqLM,
TFRagModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
from transformers.modeling_tf_outputs import TFBaseModelOutput
from ..bart.test_modeling_tf_bart import TFBartModelTester
from ..dpr.test_modeling_tf_dpr import TFDPRModelTester
TOLERANCE = 1e-3
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_tf_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires tensorflow, datasets and faiss")(test_case)
return test_case
@require_tf
@require_retrieval
@require_sentencepiece
class TFRagTestMixin:
all_model_classes = (
(TFRagModel, TFRagTokenForGeneration, TFRagSequenceForGeneration)
if is_tf_available() and is_datasets_available() and is_faiss_available()
else ()
)
all_generative_model_classes = (
(TFRagTokenForGeneration, TFRagSequenceForGeneration)
if is_tf_available() and is_datasets_available() and is_faiss_available()
else ()
)
retrieval_vector_size = 32
n_docs = 3
max_combined_length = 16
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
@cached_property
def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_retriever(self, config):
dataset = Dataset.from_dict(
{
"id": ["0", "1", "3"],
"text": ["foo", "bar", "qux"],
"title": ["Foo", "Bar", "Qux"],
"embeddings": [
np.ones(self.retrieval_vector_size),
2 * np.ones(self.retrieval_vector_size),
3 * np.ones(self.retrieval_vector_size),
],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
tokenizer = self.bart_tokenizer
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.dpr_tokenizer,
generator_tokenizer=tokenizer,
)
return retriever
def check_model_with_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config))
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_generate_from_context_input_ids(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for i, model_class in enumerate(self.all_generative_model_classes):
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
outputs = model.generate(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
)
self.assertIsNotNone(outputs)
def check_model_generate(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_generative_model_classes:
model = model_class(config, retriever=self.get_retriever(config))
self.assertTrue(model.config.is_encoder_decoder)
input_ids = tf.cast(input_ids, tf.int32)
outputs = model.generate(
input_ids=input_ids,
num_beams=2,
num_return_sequences=2,
decoder_start_token_id=config.generator.eos_token_id,
max_new_tokens=5,
)
self.assertIsNotNone(outputs)
def check_model_without_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
outputs = model(
input_ids=None,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_custom_n_docs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
n_docs=n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
outputs = model(
input_ids=None,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=n_docs,
)
# logits
self.assertEqual(
outputs.logits.shape,
(n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs))
def check_model_with_mismatch_n_docs_value(
self,
config,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
retriever_n_docs,
generator_n_docs,
**kwargs,
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.numpy(),
prefix=config.generator.prefix,
return_tensors="tf",
n_docs=retriever_n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
# compute doc_scores
doc_scores = tf.squeeze(
tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), retrieved_doc_embeds, transpose_b=True),
axis=[1],
)
self.assertRaises(
AssertionError,
model.__call__,
input_ids=None,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=generator_n_docs,
)
def check_model_with_encoder_outputs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config))
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
encoder_outputs = TFBaseModelOutput(outputs.generator_enc_last_hidden_state)
# run only generator
outputs = model(
input_ids=None,
encoder_outputs=encoder_outputs,
doc_scores=outputs.doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def test_model_with_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_retriever(**inputs_dict)
def test_model_without_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_without_retriever(**inputs_dict)
@slow
def test_model_generate_from_context_input_ids(self):
inputs_dict = self.config_and_inputs
self.check_model_generate_from_context_input_ids(**inputs_dict)
def test_model_with_encoder_outputs(self):
inputs_dict = self.config_and_inputs
self.check_model_with_encoder_outputs(**inputs_dict)
@slow
def test_model_generate(self):
inputs_dict = self.config_and_inputs
self.check_model_generate(**inputs_dict)
def test_model_with_custom_n_docs(self):
inputs_dict = self.config_and_inputs
inputs_dict["n_docs"] = 1
self.check_model_custom_n_docs(**inputs_dict)
def test_model_with_mismatch_n_docs_value(self):
inputs_dict = self.config_and_inputs
inputs_dict["retriever_n_docs"] = 3
inputs_dict["generator_n_docs"] = 2
self.check_model_with_mismatch_n_docs_value(**inputs_dict)
@require_tf
@require_retrieval
class TFRagDPRBartTest(TFRagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = TFDPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = TFBartModelTester(self)
bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, bart_inputs_dict) = bart_config_and_inputs
decoder_input_ids, decoder_attention_mask = bart_inputs_dict["input_ids"], bart_inputs_dict["attention_mask"]
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_tf
@require_retrieval
@require_sentencepiece
@require_tokenizers
class TFRagModelIntegrationTests(unittest.TestCase):
@cached_property
def token_model(self):
return TFRagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
@cached_property
def sequence_model(self):
return TFRagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
def token_model_nq_checkpoint(self, retriever):
return TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="exact",
index_path=None,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
)
@slow
def test_rag_sequence_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.7368])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_rag_token_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.3557])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_rag_token_inference_nq_checkpoint(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model_nq_checkpoint(retriever=rag_retriever)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
rag_token.save_pretrained(tmpdirname)
rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50265])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[62.9402, 62.7107, 62.2382, 62.1194, 61.8578]])
expected_loss = tf.convert_to_tensor([32.521812])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_rag_token_inference_save_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
# model must run once to be functional before loading/saving works
rag_token(
input_ids,
labels=decoder_input_ids,
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
rag_token.save_pretrained(tmpdirname)
rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.3557])
tf.debugging.assert_near(output.loss, expected_loss, atol=1e-3)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=1e-3)
@slow
def test_init_and_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_config = RagConfig.from_pretrained("facebook/rag-sequence-base")
rag = TFRagTokenForGeneration(rag_config, retriever=rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
rag(
input_ids,
decoder_input_ids=decoder_input_ids,
)
# this should not give any warnings
with tempfile.TemporaryDirectory() as tmpdirname:
rag.save_pretrained(tmpdirname)
rag = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
@property
def test_data_questions(self):
return [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
]
@slow
def test_rag_token_greedy_search(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
# check first two questions
input_dict = tokenizer(
self.test_data_questions[:2],
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
# make sure only 1 beam is used
rag_token.config.num_beams = 1
output_ids = rag_token.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22, 2017",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_token_generate_batch(self):
# NOTE: gold labels comes from num_beam=4, so this is effectively beam-search test
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22, 2017",
" amplitude modulation",
" stefan persson",
" april 20, 2018",
" the 1970s",
" 7.1. 2",
" 13",
]
# Split into 2 batches of 4 examples to avoid GPU OOM.
output_ids = rag_token.generate(
input_ids[:4],
attention_mask=attention_mask[:4],
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertListEqual(outputs, EXPECTED_OUTPUTS[:4])
output_ids = rag_token.generate(
input_ids[4:],
attention_mask=attention_mask[4:],
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertListEqual(outputs, EXPECTED_OUTPUTS[4:])
@slow
def test_rag_sequence_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
)
rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
output_ids = rag_sequence.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22, 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20, 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_sequence_generate_batch_from_context_input_ids(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
)
rag_sequence = TFRagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="tf",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids
question_hidden_states = rag_sequence.question_encoder(input_ids)[0]
docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
doc_scores = tf.squeeze(
tf.matmul(
tf.expand_dims(question_hidden_states, axis=[1]), docs_dict["retrieved_doc_embeds"], transpose_b=True
),
axis=[1],
)
output_ids = rag_sequence.generate(
context_input_ids=docs_dict["context_input_ids"],
context_attention_mask=docs_dict["context_attention_mask"],
doc_scores=doc_scores,
do_deduplication=True,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22, 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20, 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@require_tf
@require_retrieval
class TFRagModelSaveLoadTests(unittest.TestCase):
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="exact",
index_path=None,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
)
@slow
def test_rag_sequence_from_pretrained(self):
load_weight_prefix = "tf_rag_model_1"
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_sequence = TFRagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
)
# check that the from pretrained methods work
rag_sequence.save_pretrained(tmp_dirname)
rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever)
output = rag_sequence(input_ids, labels=decoder_input_ids)
loss_pretrained = output.loss
del rag_sequence
question_encoder = TFAutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = TFAutoModelForSeq2SeqLM.from_pretrained(
"facebook/bart-large-cnn", load_weight_prefix=load_weight_prefix, name="generator"
)
rag_sequence = TFRagSequenceForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
output = rag_sequence(input_ids, labels=decoder_input_ids)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained, loss_init, places=4)
@slow
def test_rag_token_from_pretrained(self):
load_weight_prefix = "tf_rag_model_1"
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="tf"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_token = TFRagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
)
# check that the from pretrained methods work
rag_token.save_pretrained(tmp_dirname)
rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever)
output = rag_token(input_ids, labels=decoder_input_ids)
loss_pretrained = output.loss
del rag_token
question_encoder = TFAutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = TFAutoModelForSeq2SeqLM.from_pretrained(
"facebook/bart-large-cnn", load_weight_prefix=load_weight_prefix, name="generator"
)
rag_token = TFRagTokenForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
output = rag_token(input_ids, labels=decoder_input_ids)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained, loss_init, places=4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/rag/test_retrieval_rag.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class RagRetrieverTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_dummy_dataset(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def get_dummy_canonical_hf_index_retriever(self):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
)
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
return retriever
def get_dummy_custom_hf_index_retriever(self, from_disk: bool):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="custom",
)
if from_disk:
config.passages_path = os.path.join(self.tmpdirname, "dataset")
config.index_path = os.path.join(self.tmpdirname, "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset"))
del dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
else:
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
index=CustomHFIndex(config.retrieval_vector_size, dataset),
)
return retriever
def get_dummy_legacy_index_retriever(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
index_file_name = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index")
dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr")
pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb"))
passages_file_name = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl")
passages = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(passages, open(passages_file_name, "wb"))
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="legacy",
index_path=self.tmpdirname,
)
retriever = RagRetriever(
config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer()
)
return retriever
def test_canonical_hf_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_canonical_hf_index_retriever()
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_canonical_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = self.get_dummy_dataset()
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_custom_hf_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_custom_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_custom_hf_index_retriever_retrieve_from_disk(self):
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self):
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_legacy_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_legacy_index_retriever()
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["text", "title"])
self.assertEqual(len(doc_dicts[0]["text"]), n_docs)
self.assertEqual(doc_dicts[0]["text"][0], "bar") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0], "foo") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_legacy_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def test_hf_index_retriever_call(self):
import torch
n_docs = 1
retriever = self.get_dummy_canonical_hf_index_retriever()
question_input_ids = [[5, 7], [10, 11]]
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(context_input_ids, list)
self.assertIsInstance(context_attention_mask, list)
self.assertIsInstance(retrieved_doc_embeds, np.ndarray)
out = retriever(
question_input_ids,
hidden_states,
prefix=retriever.config.generator.prefix,
n_docs=n_docs,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(context_input_ids, torch.Tensor)
self.assertIsInstance(context_attention_mask, torch.Tensor)
self.assertIsInstance(retrieved_doc_embeds, torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def test_custom_hf_index_end2end_retriever_call(self):
context_encoder_tokenizer = self.get_dpr_ctx_encoder_tokenizer()
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer)
question_input_ids = [[5, 7], [10, 11]]
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs)
self.assertEqual(
len(out), 6
) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")), True
) # check for doc token related keys in dictionary.
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/rag/test_modeling_rag.py
|
# coding=utf-8
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
import numpy as np
from transformers import BartTokenizer, T5Tokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import (
get_tests_dir,
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_non_multi_gpu,
slow,
torch_device,
)
from transformers.utils import cached_property, is_datasets_available, is_faiss_available, is_torch_available
from ..bart.test_modeling_bart import BartModelTester
from ..dpr.test_modeling_dpr import DPRModelTester
from ..t5.test_modeling_t5 import T5ModelTester
TOLERANCE = 1e-3
T5_SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available() and is_datasets_available() and is_faiss_available():
import faiss
import torch
from datasets import Dataset
from transformers import (
AutoConfig,
AutoModel,
AutoModelForSeq2SeqLM,
DPRContextEncoder,
RagConfig,
RagModel,
RagRetriever,
RagSequenceForGeneration,
RagTokenForGeneration,
RagTokenizer,
)
from transformers.modeling_outputs import BaseModelOutput
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_torch_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires PyTorch, datasets and faiss")(test_case)
return test_case
@require_torch
@require_retrieval
@require_sentencepiece
class RagTestMixin:
all_model_classes = (
(RagModel, RagTokenForGeneration, RagSequenceForGeneration)
if is_torch_available() and is_datasets_available() and is_faiss_available()
else ()
)
retrieval_vector_size = 32
n_docs = 3
max_combined_length = 16
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
t5_tokenizer = T5Tokenizer(T5_SAMPLE_VOCAB)
t5_tokenizer_path = os.path.join(self.tmpdirname, "t5_tokenizer")
t5_tokenizer.save_pretrained(t5_tokenizer_path)
@cached_property
def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
@cached_property
def t5_tokenizer(self) -> BartTokenizer:
return T5Tokenizer.from_pretrained(os.path.join(self.tmpdirname, "t5_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def get_retriever(self, config):
dataset = Dataset.from_dict(
{
"id": ["0", "1", "3"],
"text": ["foo", "bar", "qux"],
"title": ["Foo", "Bar", "Qux"],
"embeddings": [
np.ones(self.retrieval_vector_size),
2 * np.ones(self.retrieval_vector_size),
3 * np.ones(self.retrieval_vector_size),
],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
tokenizer = self.bart_tokenizer if config.generator.model_type == "bart" else self.t5_tokenizer
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.dpr_tokenizer,
generator_tokenizer=tokenizer,
)
return retriever
def check_model_with_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_with_end2end_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
context_encoder_tokenizer = self.dpr_ctx_encoder_tokenizer
dpr_context_encoder = DPRContextEncoder(config.question_encoder) # dpr is a twin tower
retriever = self.get_retriever(config)
retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) # setting the ctx_encoder_tokenizer.
for model_class in [RagTokenForGeneration, RagSequenceForGeneration]:
model = model_class(config, retriever=retriever)
model.set_context_encoder_for_training(dpr_context_encoder) # set the context_encoder for training
model.to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_generate_from_context_input_ids(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
outputs = model.generate(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
do_deduplication=True,
)
self.assertIsNotNone(outputs)
def check_model_generate(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes[1:]:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model.generate(
input_ids=input_ids,
num_beams=2,
num_return_sequences=2,
decoder_start_token_id=config.generator.eos_token_id,
)
self.assertIsNotNone(outputs)
def check_model_without_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
outputs = model(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_custom_n_docs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
n_docs=n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
outputs = model(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=n_docs,
)
# logits
self.assertEqual(
outputs.logits.shape,
(n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs))
def check_model_with_mismatch_n_docs_value(
self,
config,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
retriever_n_docs,
generator_n_docs,
**kwargs,
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
n_docs=retriever_n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
self.assertRaises(
AssertionError,
model.__call__,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=generator_n_docs,
)
def check_model_with_encoder_outputs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
encoder_outputs = BaseModelOutput(outputs.generator_enc_last_hidden_state)
# run only generator
outputs = model(
encoder_outputs=encoder_outputs,
doc_scores=outputs.doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def test_model_with_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_retriever(**inputs_dict)
def test_model_with_end2end_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_end2end_retriever(**inputs_dict)
def test_model_without_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_without_retriever(**inputs_dict)
def test_model_with_encoder_outputs(self):
inputs_dict = self.config_and_inputs
self.check_model_with_encoder_outputs(**inputs_dict)
def test_model_generate(self):
inputs_dict = self.config_and_inputs
self.check_model_generate(**inputs_dict)
def test_model_with_custom_n_docs(self):
inputs_dict = self.config_and_inputs
inputs_dict["n_docs"] = 1
self.check_model_custom_n_docs(**inputs_dict)
def test_model_with_mismatch_n_docs_value(self):
inputs_dict = self.config_and_inputs
inputs_dict["retriever_n_docs"] = 3
inputs_dict["generator_n_docs"] = 2
self.check_model_with_mismatch_n_docs_value(**inputs_dict)
@require_torch
@require_retrieval
class RagDPRBartTest(RagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = DPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = BartModelTester(self)
bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, bart_inputs_dict) = bart_config_and_inputs
decoder_input_ids, decoder_attention_mask = bart_inputs_dict["input_ids"], bart_inputs_dict["attention_mask"]
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_torch
@require_retrieval
class RagDPRT5Test(RagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = DPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = T5ModelTester(self, vocab_size=1100)
t5_config_and_inputs = generator_tester.prepare_config_and_inputs()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, _, decoder_input_ids, _, decoder_attention_mask, _) = t5_config_and_inputs
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_torch
@require_retrieval
@require_sentencepiece
@require_tokenizers
@require_torch_non_multi_gpu
class RagModelIntegrationTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@cached_property
def sequence_model(self):
return (
RagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
.to(torch_device)
.eval()
)
@cached_property
def token_model(self):
return (
RagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
.to(torch_device)
.eval()
)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="exact",
index_path=None,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
)
@slow
def test_rag_sequence_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
expected_shape = torch.Size([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device)
_assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE)
expected_loss = torch.tensor([36.7368]).to(torch_device)
_assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE)
@slow
def test_rag_token_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = torch.Size([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device)
_assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE)
expected_loss = torch.tensor([36.3557]).to(torch_device)
_assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE)
@slow
def test_rag_token_generate_beam(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
input_ids = input_ids.to(torch_device)
output_ids = rag_token.generate(
input_ids,
decoder_start_token_id=rag_token.generator.config.decoder_start_token_id,
num_beams=2,
num_return_sequences=2,
)
# sequence generate test
output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True)
output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True)
# Expected outputs as given by model at integration time.
EXPECTED_OUTPUT_TEXT_1 = "\"She's My Kind of Girl"
EXPECTED_OUTPUT_TEXT_2 = "\"She's My Kind of Love"
self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1)
self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2)
@slow
def test_rag_sequence_generate_beam(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
input_ids = input_ids.to(torch_device)
output_ids = rag_sequence.generate(
input_ids,
decoder_start_token_id=rag_sequence.generator.config.decoder_start_token_id,
num_beams=2,
num_return_sequences=2,
)
# sequence generate test
output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True)
output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True)
# Expected outputs as given by model at integration time.
EXPECTED_OUTPUT_TEXT_1 = """\"She's My Kind of Girl\" was released through Epic Records in Japan in March 1972, giving the duo a Top 10 hit. Two more singles were released in Japan, \"En Carousel\" and \"Love Has Its Ways\" Ulvaeus and Andersson persevered with their songwriting and experimented with new sounds and vocal arrangements."""
EXPECTED_OUTPUT_TEXT_2 = """In September 2018, Björn Ulvaeus revealed that the two new songs, \"I Still Have Faith In You\" and \"Don't Shut Me Down\", would be released no earlier than March 2019. The two new tracks will feature in a TV special set to air later in the year."""
self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1)
self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2)
@property
def test_data_questions(self):
return [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
]
@slow
def test_rag_sequence_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
)
rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to(
torch_device
)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
output_ids = rag_sequence.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22, 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20, 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_sequence_generate_batch_from_context_input_ids(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
)
rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to(
torch_device
)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
question_hidden_states = rag_sequence.question_encoder(input_ids, attention_mask=attention_mask)[0]
docs_dict = retriever(
input_ids.cpu().detach().numpy(), question_hidden_states.cpu().detach().numpy(), return_tensors="pt"
)
doc_scores = torch.bmm(
question_hidden_states.unsqueeze(1),
docs_dict["retrieved_doc_embeds"].to(torch_device).float().transpose(1, 2),
).squeeze(1)
output_ids = rag_sequence.generate(
context_input_ids=docs_dict["context_input_ids"].to(torch_device),
context_attention_mask=docs_dict["context_attention_mask"].to(torch_device),
doc_scores=doc_scores.to(torch_device),
do_deduplication=True,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22, 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20, 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_token_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True)
rag_token = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever).to(
torch_device
)
if torch_device == "cuda":
rag_token.half()
input_dict = tokenizer(
self.test_data_questions,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
output_ids = rag_token.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22, 2017",
" amplitude modulation",
" stefan persson",
" april 20, 2018",
" the 1970s",
" 7.1. 2",
" 13",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@require_torch
@require_retrieval
class RagModelSaveLoadTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="exact",
index_path=None,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
)
@slow
def test_rag_sequence_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_sequence = RagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
).to(torch_device)
# check that the from pretrained methods work
rag_sequence.save_pretrained(tmp_dirname)
rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever)
rag_sequence.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
loss_pretrained = output.loss
del rag_sequence
question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
rag_sequence = RagSequenceForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
rag_sequence.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
@slow
def test_rag_token_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_token = RagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
question_encoder_max_length=200,
generator_max_length=200,
).to(torch_device)
# check that the from pretrained methods work
rag_token.save_pretrained(tmp_dirname)
rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever)
rag_token.to(torch_device)
self.assertTrue(rag_token.question_encoder.config.max_length == 200)
self.assertTrue(rag_token.generator.config.max_length == 200)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
loss_pretrained = output.loss
del rag_token
question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
rag_token = RagTokenForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
rag_token.to(torch_device)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/ctrl/test_modeling_tf_ctrl.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import CTRLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.ctrl.modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
)
class TFCTRLModelTester(object):
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 2
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
dff=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFCTRLModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, None, input_mask] # None is the input for 'past'
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_ctrl_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFCTRLLMHeadModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_ctrl_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
inputs = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = TFCTRLForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFCTRLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TFCTRLModel, TFCTRLLMHeadModel, TFCTRLForSequenceClassification) if is_tf_available() else ()
all_generative_model_classes = (TFCTRLLMHeadModel,) if is_tf_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": TFCTRLModel,
"text-classification": TFCTRLForSequenceClassification,
"text-generation": TFCTRLLMHeadModel,
"zero-shot": TFCTRLForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def setUp(self):
self.model_tester = TFCTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_lm_head(*config_and_inputs)
def test_ctrl_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_for_sequence_classification(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = [TFCTRLLMHeadModel]
list_other_models_with_output_ebd = [TFCTRLForSequenceClassification]
for model_class in self.all_model_classes:
model = model_class(config)
model.build() # may be needed for the get_bias() call below
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_lm_models:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
elif model_class in list_other_models_with_output_ebd:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
@slow
def test_model_from_pretrained(self):
for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFCTRLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_ctrl(self):
model = TFCTRLLMHeadModel.from_pretrained("Salesforce/ctrl")
input_ids = tf.convert_to_tensor([[11859, 0, 1611, 8]], dtype=tf.int32) # Legal the president is
expected_output_ids = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/ctrl/test_modeling_ctrl.py
|
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class CTRLModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(self):
return CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
dff=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def create_and_check_ctrl_for_sequence_classification(self, config, input_ids, head_mask, token_type_ids, *args):
config.num_labels = self.num_labels
model = CTRLForSequenceClassification(config)
model.to(torch_device)
model.eval()
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
@require_torch
class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
test_pruning = True
test_resize_embeddings = False
test_head_masking = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def setUp(self):
self.model_tester = CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
@require_torch
class CTRLModelLanguageGenerationTest(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
@slow
def test_lm_generate_ctrl(self):
model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl")
model.to(torch_device)
input_ids = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device
) # Legal the president is
expected_output_ids = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/ctrl/test_tokenization_ctrl.py
|
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class CTRLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = CTRLTokenizer
test_rust_tokenizer = False
test_seq2seq = False
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "adapt react readapt apt"
output_text = "adapt react readapt apt"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "adapt react readapt apt"
bpe_tokens = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/mt5/test_modeling_flax_mt5.py
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMT5ForConditionalGeneration
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class MT5IntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/mt5/test_modeling_mt5.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class MT5IntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-small", return_dict=True).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 0
|
hf_public_repos/transformers/tests/models
|
hf_public_repos/transformers/tests/models/mt5/test_modeling_tf_mt5.py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class TFMT5ModelIntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFAutoModelForSeq2SeqLM.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_mean(loss).numpy()
EXPECTED_SCORE = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/deepspeed/test_model_zoo.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import subprocess
from os.path import dirname
from parameterized import parameterized
from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa
from transformers import is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_tests_dir,
require_deepspeed,
require_torch_gpu,
slow,
)
from transformers.trainer_utils import set_seed
if is_torch_available():
from tests.trainer.test_trainer import ( # noqa
RegressionModelConfig,
RegressionPreTrainedModel,
get_regression_trainer,
)
set_seed(42)
FIXTURE_DIRECTORY = get_tests_dir("fixtures")
ROOT_DIRECTORY = os.path.join(dirname(get_tests_dir()))
DS_TESTS_DIRECTORY = dirname(os.path.abspath(__file__))
# default torch.distributed port
DEFAULT_MASTER_PORT = "10999"
T5_SMALL = "t5-small"
# *** Working Models ***
ALBERT_TINY = "hf-internal-testing/tiny-albert"
BART_TINY = "sshleifer/bart-tiny-random"
BERT_TINY = "hf-internal-testing/tiny-bert"
BIGBIRD_PEGASUS_TINY = "hf-internal-testing/tiny-random-bigbird_pegasus"
BIG_BIRD_TINY = "hf-internal-testing/tiny-random-big_bird"
BLENDERBOT_TINY = "hf-internal-testing/tiny-random-blenderbot"
BLOOM_TINY = "bigscience/bigscience-small-testing"
DEBERTA_TINY = "hf-internal-testing/tiny-random-deberta"
DEBERTA_V2_TINY = "hf-internal-testing/tiny-random-deberta-v2"
DISTILBERT_TINY = "sshleifer/tiny-distilbert-base-cased"
ELECTRA_TINY = "hf-internal-testing/tiny-electra"
FLAUBERT_TINY = "hf-internal-testing/tiny-random-flaubert"
FSMT_TINY = "stas/tiny-wmt19-en-de"
FUNNEL_TINY = "hf-internal-testing/tiny-random-funnel"
GPT2_TINY = "sshleifer/tiny-gpt2"
GPTJ_TINY = "hf-internal-testing/tiny-random-gptj"
GPT_NEO_TINY = "hf-internal-testing/tiny-random-gpt_neo"
LAYOUTLM_TINY = "hf-internal-testing/tiny-layoutlm"
LED_TINY = "hf-internal-testing/tiny-random-led"
LONGFORMER_TINY = "hf-internal-testing/tiny-random-longformer"
M2M_100_TINY = "stas/tiny-m2m_100" # hf tiny model is unsuitable
MARIAN_TINY = "sshleifer/tiny-marian-en-de"
MBART_TINY = "sshleifer/tiny-mbart"
MOBILEBERT_TINY = "hf-internal-testing/tiny-random-mobilebert"
MPNET_TINY = "hf-internal-testing/tiny-random-mpnet"
PEGASUS_TINY = "stas/pegasus-cnn_dailymail-tiny-random"
PROPHETNET_TINY = "hf-internal-testing/tiny-random-prophetnet"
ROBERTA_TINY = "sshleifer/tiny-distilroberta-base"
SQUEEZEBERT_TINY = "hf-internal-testing/tiny-random-squeezebert"
T5_TINY = "patrickvonplaten/t5-tiny-random"
T5_V1_TINY = "hf-internal-testing/tiny-random-t5-v1.1"
VIT_TINY = "hf-internal-testing/tiny-random-vit"
XLM_ROBERTA_TINY = "hf-internal-testing/tiny-xlm-roberta"
XLNET_TINY = "sshleifer/tiny-xlnet-base-cased"
# *** To Fix ***
# *** tiny model issues ***
# missing model files:
MT5_TINY = "hf-internal-testing/tiny-random-mt5"
CAMEMBERT_TINY = "hf-internal-testing/tiny-random-camembert"
OPENAI_GPT_TINY = "hf-internal-testing/tiny-random-openai-gpt"
# missing tokenizer files
CONVBERT_TINY = "hf-internal-testing/tiny-random-convbert"
LAYOUTLMV2_TINY = "hf-internal-testing/tiny-random-layoutlmv2"
HUBERT_TINY = "hf-internal-testing/tiny-random-hubert"
# issues with tokenizer
CTRL_TINY = "hf-internal-testing/tiny-random-ctrl"
TRANSFO_XL_TINY = "hf-internal-testing/tiny-random-transfo-xl" # same as ctrl
# other issues with tiny models
IBERT_TINY = "hf-internal-testing/tiny-random-ibert" # multiple issues with either mlm/qa/clas
REFORMER_TINY = "hf-internal-testing/tiny-random-reformer" # multiple issues with either mlm/qa/clas
# *** Lacking official examples to test with ***
# or not working with examples
DPR_TINY = "hf-internal-testing/tiny-random-dpr"
# - "dpr" examples/research_projects/rag-end2end-retriever/
RAG_TINY = "hf-internal-testing/tiny-random-rag"
# - "rag" research_projects
LUKE_TINY = ""
# - "luke" Entities classes - no plan to make such example
LXMERT_TINY = "hf-internal-testing/tiny-random-lxmert"
# - "lxmert" doesn't work with run_qa.py
CLIP_TINY = "hf-internal-testing/tiny-random-clip"
# - "clip" nothing under pytorch examples - XXX: Suraj is working on adding some - check by end of Sep
SPEECH_TO_TEXT_TINY = "hf-internal-testing/tiny-random-speech_to_text"
# - "speech_to_text", nothing under pytorch examples
# *** Reactive mode ***
# models with low usage, unstable API, things about to change - do nothing about the following until someone runs into a problem
TAPAS_TINY = "hf-internal-testing/tiny-random-tapas"
# additional notes on tapas
# 1. "Table must be of type pd.DataFrame" failure
# TODO: new models to add:
#
def get_launcher(distributed=False):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
num_gpus = min(2, get_gpu_count()) if distributed else 1
master_port = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT)
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split()
def make_task_cmds():
data_dir_samples = f"{FIXTURE_DIRECTORY}/tests_samples"
data_dir_wmt = f"{data_dir_samples}/wmt_en_ro"
data_dir_xsum = f"{data_dir_samples}/xsum"
args_main = """
--do_train
--max_train_samples 4
--per_device_train_batch_size 2
--num_train_epochs 1
--fp16
--report_to none
--overwrite_output_dir
""".split()
# try to cover as many models as possible once (it's enough to run on one task per model)
# but need a tiny model for each
#
# should have "{model_type.upper()}_TINY" corresponding vars defined, e.g., T5_TINY, etc.
tasks2models = {
"trans": [
"bart",
"fsmt",
"m2m_100",
"marian",
"mbart",
"t5",
"t5_v1",
# "mt5", missing model files
],
"sum": [
"pegasus",
],
"clm": [
"big_bird",
"bigbird_pegasus",
"blenderbot",
"bloom",
"gpt2",
"gpt_neo",
"gptj",
"xlm-roberta",
"prophetnet",
# "camembert", missing model files
],
"mlm": [
"albert",
"deberta",
"deberta-v2",
"distilbert",
"electra",
"flaubert",
"funnel",
"layoutlm",
# "reformer", # multiple issues with either mlm/qa/clas
],
"qa": [
"led",
"longformer",
"mobilebert",
"mpnet",
"roberta",
"squeezebert",
# "convbert", # missing tokenizer files
# "layoutlmv2", missing model files
],
"clas": [
"bert",
"xlnet",
# "hubert", # missing tokenizer files
# "ibert", # multiple issues with either mlm/qa/clas
# "transfo-xl", # tokenizer issues as ctrl
# "ctrl", # tokenizer issues
# "openai-gpt", missing model files
# "tapas", multiple issues
],
"img_clas": [
"vit",
],
}
scripts_dir = f"{ROOT_DIRECTORY}/examples/pytorch"
tasks = {
"trans": f"""
{scripts_dir}/translation/run_translation.py
--train_file {data_dir_wmt}/train.json
--source_lang en
--target_lang ro
""",
"sum": f"""
{scripts_dir}/summarization/run_summarization.py
--train_file {data_dir_xsum}/sample.json
--max_source_length 12
--max_target_length 12
--lang en
""",
"clm": f"""
{scripts_dir}/language-modeling/run_clm.py
--train_file {FIXTURE_DIRECTORY}/sample_text.txt
--block_size 8
""",
"mlm": f"""
{scripts_dir}/language-modeling/run_mlm.py
--train_file {FIXTURE_DIRECTORY}/sample_text.txt
""",
"qa": f"""
{scripts_dir}/question-answering/run_qa.py
--train_file {data_dir_samples}/SQUAD/sample.json
""",
"clas": f"""
{scripts_dir}/text-classification/run_glue.py
--train_file {data_dir_samples}/MRPC/train.csv
--max_seq_length 12
--task_name MRPC
""",
"img_clas": f"""
{scripts_dir}/image-classification/run_image_classification.py
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--remove_unused_columns False
--max_steps 10
--image_processor_name {DS_TESTS_DIRECTORY}/vit_feature_extractor.json
""",
}
launcher = get_launcher(distributed=True)
cmds = {}
for task, args in tasks.items():
args = args.split()
for model in tasks2models[task]:
model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"]
args_model = f"--model_name_or_path {model_name}".split()
cmds[f"{task}_{model}"] = launcher + args + args_model + args_main
# # generation special case
# if task == "gen":
# launcher = f"deepspeed --num_nodes 1 --num_gpus 1".split()
# args_model += f"--model_type {model}".split()
# cmds[f"{task}_{model}"] = launcher + args + args_model
# else:
return cmds
task_cmds = make_task_cmds()
ZERO2 = "zero2"
ZERO3 = "zero3"
stages = [ZERO2, ZERO3]
# future preparation:
# for now test just fp16, as these tests are quite slow
# FP16 = "fp16"
# BF16 = "bf16"
#
# dtypes = [FP16]
# so just hardcoding --fp16 for now
# if is_torch_bf16_gpu_available():
# dtypes += [BF16]
def parameterized_custom_name_func(func, param_num, param):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
params = list(itertools.product(stages, task_cmds.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class TestDeepSpeedModelZoo(TestCasePlus):
"""This class is for testing via an external script - can do multiple gpus"""
def get_task_cmd(self, task, stage):
# return a ready to run train cmd
if task not in task_cmds:
raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}")
cmd = task_cmds[task]
args_ds = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
output_dir = self.get_auto_remove_tmp_dir()
args_out = f"--output_dir {output_dir}".split()
cmd += args_ds + args_out
return cmd, output_dir
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_zero_to_fp32(self, stage, task):
# testing the ability to do a run followed by recovery of full fp32 weights
cmd, output_dir = self.get_task_cmd(task, stage)
# 1. generate the checkpoint
cmd += "--save_steps 1".split()
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] + cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
# 2. test that the fp32 weights get reconsolidated
chkpt_dir = f"{output_dir}/checkpoint-1"
recovered_model_path = f"{chkpt_dir}/out.bin"
cmd = f"{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}"
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
subprocess.check_call(cmd, shell=True)
assert os.path.exists(recovered_model_path), f"{recovered_model_path} was not found"
# possibly could also test that the resulting saved model is usable but given that we use
# random models we won't know if it's any good
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/deepspeed/test_deepspeed.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import io
import itertools
import json
import os
import unittest
from copy import deepcopy
from functools import partial
import datasets
from parameterized import parameterized
import tests.trainer.test_trainer
from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa
from transformers import AutoModel, TrainingArguments, is_torch_available, logging
from transformers.integrations.deepspeed import (
HfDeepSpeedConfig,
is_deepspeed_available,
unset_hf_deepspeed_config,
)
from transformers.testing_utils import (
CaptureLogger,
CaptureStd,
CaptureStderr,
LoggingLevel,
TestCasePlus,
backend_device_count,
execute_subprocess_async,
mockenv_context,
require_deepspeed,
require_optuna,
require_torch_accelerator,
require_torch_multi_accelerator,
slow,
torch_device,
)
from transformers.trainer_utils import get_last_checkpoint, set_seed
from transformers.utils import SAFE_WEIGHTS_NAME, is_torch_bf16_available_on_device
if is_torch_available():
from tests.trainer.test_trainer import ( # noqa
RegressionModelConfig,
RegressionPreTrainedModel,
)
# hack to restore original logging level pre #21700
get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info")
set_seed(42)
# default torch.distributed port
DEFAULT_MASTER_PORT = "10999"
T5_SMALL = "t5-small"
T5_TINY = "patrickvonplaten/t5-tiny-random"
GPT2_TINY = "sshleifer/tiny-gpt2"
def load_json(path):
with open(path) as f:
return json.load(f)
def get_master_port(real_launcher=False):
"""
When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed)
the issue is that once the port is tied it can't be used anywhere else outside of this process,
since torch.dist doesn't free the port until the process exits. Therefore for the sake of being
able to run both emulated launcher and normal launcher tests we need 2 distinct ports.
This function will give the right port in the right context. For real launcher it'll give the
base port, for emulated launcher it'll give the base port + 1. In both cases a string is
returned.
Args:
`real_launcher`: whether a real launcher is going to be used, or the emulated one
"""
master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT)
if not real_launcher:
master_port_base = str(int(master_port_base) + 1)
return master_port_base
def require_deepspeed_aio(test_case):
"""
Decorator marking a test that requires deepspeed aio (nvme)
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
import deepspeed
from deepspeed.ops.aio import AsyncIOBuilder
if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
return unittest.skip("test requires deepspeed async-io")(test_case)
else:
return test_case
if is_deepspeed_available():
from deepspeed.utils import logger as deepspeed_logger # noqa
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
from transformers.integrations.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled # noqa
def get_launcher(distributed=False):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1
master_port = get_master_port(real_launcher=True)
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split()
ZERO2 = "zero2"
ZERO3 = "zero3"
FP16 = "fp16"
BF16 = "bf16"
HF_OPTIM = "hf_optim"
HF_SCHEDULER = "hf_scheduler"
DS_OPTIM = "ds_optim"
DS_SCHEDULER = "ds_scheduler"
optims = [HF_OPTIM, DS_OPTIM]
schedulers = [HF_SCHEDULER, DS_SCHEDULER]
stages = [ZERO2, ZERO3]
if is_torch_bf16_available_on_device(torch_device):
dtypes = [FP16, BF16]
else:
dtypes = [FP16]
def parameterized_custom_name_func(func, param_num, param):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
params = list(itertools.product(stages, dtypes))
params_with_optims_and_schedulers = list(itertools.product(stages, dtypes, optims, schedulers))
@require_deepspeed
@require_torch_accelerator
class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
"""
Testing non-Trainer DeepSpeed integration
"""
def setUp(self):
super().setUp()
master_port = get_master_port(real_launcher=False)
self.dist_env_1_gpu = {
"MASTER_ADDR": "localhost",
"MASTER_PORT": master_port,
"RANK": "0",
"LOCAL_RANK": "0",
"WORLD_SIZE": "1",
}
def tearDown(self):
super().tearDown()
# reset the ds config global so that tests state doesn't leak
unset_hf_deepspeed_config()
def test_init_zero3_fp16(self):
# test that zero.Init() works correctly under zero3/fp16
ds_config = {
"train_batch_size": 1,
"zero_optimization": {
"stage": 3,
},
}
dschf = HfDeepSpeedConfig(ds_config)
self.assertTrue(dschf.is_zero3())
self.assertTrue(is_deepspeed_zero3_enabled())
with LoggingLevel(logging.INFO):
with mockenv_context(**self.dist_env_1_gpu):
logger = logging.get_logger("transformers.modeling_utils")
with CaptureLogger(logger) as cl:
AutoModel.from_pretrained(T5_TINY)
self.assertIn("Detected DeepSpeed ZeRO-3", cl.out)
# now remove zero optimization
del ds_config["zero_optimization"]
dschf = HfDeepSpeedConfig(ds_config)
self.assertFalse(dschf.is_zero3())
self.assertFalse(is_deepspeed_zero3_enabled())
with LoggingLevel(logging.INFO):
with mockenv_context(**self.dist_env_1_gpu):
logger = logging.get_logger("transformers.modeling_utils")
with CaptureLogger(logger) as cl:
AutoModel.from_pretrained(T5_TINY)
self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out)
class TrainerIntegrationDeepSpeedWithCustomConfig(TestCasePlus):
def setUp(self):
super().setUp()
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
master_port = get_master_port(real_launcher=False)
self.dist_env_1_gpu = {
"MASTER_ADDR": "localhost",
"MASTER_PORT": master_port,
"RANK": "0",
"LOCAL_RANK": "0",
"WORLD_SIZE": "1",
}
self.ds_config_file = {
"zero2": f"{self.test_file_dir_str}/ds_config_zero2.json",
"zero3": f"{self.test_file_dir_str}/ds_config_zero3.json",
}
# use self.get_config_dict(stage) to use these to ensure the original is not modified
with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
config_zero2 = json.load(f)
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
config_zero3 = json.load(f)
# The following setting slows things down, so don't enable it by default unless needed by a test.
# It's in the file as a demo for users since we want everything to work out of the box even if slower.
config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False
self.ds_config_dict = {
"zero2": config_zero2,
"zero3": config_zero3,
}
def tearDown(self):
super().tearDown()
# reset the ds config global so that tests state doesn't leak
unset_hf_deepspeed_config()
def get_config_dict(self, stage):
# As some tests modify the dict, always make a copy
return deepcopy(self.ds_config_dict[stage])
@require_deepspeed
@require_torch_accelerator
class TrainerIntegrationDeepSpeed(TrainerIntegrationDeepSpeedWithCustomConfig, TrainerIntegrationCommon):
"""
This class is for testing directly via get_regression_trainer
It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods
which we can re-use here.
Important: this class' setup can only work with a single gpu because it runs within the current
pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher.
Note: if any of the tests of this class get run there will be at least one gpu occupied by them
until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels
won't be released until this pytest worker exits.
This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new
processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This
is not a bug.
"""
# --- These tests are enough to run on one of zero stages --- #
def test_hf_ds_config_mismatch(self):
ds_config = self.get_config_dict(ZERO2)
# Purposefully configure these values to mismatch TrainingArguments values.
# This currently doesn't cover all keys (but it could)
per_device_train_batch_size = 2
ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2
ds_config["train_batch_size"] = 1000
gradient_accumulation_steps = 2
ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2
max_grad_norm = 1.0
ds_config["gradient_clipping"] = max_grad_norm + 0.1
adam_beta1, adam_beta2 = 0.9, 0.99
ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1]
fp16 = True
ds_config["fp16"]["enabled"] = not fp16
keys = [
"per_device_train_batch_size",
"train_batch_size",
"gradient_accumulation_steps",
"max_grad_norm",
"betas",
"fp16",
]
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(
local_rank=0,
fp16=fp16,
deepspeed=ds_config,
per_device_train_batch_size=per_device_train_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
max_grad_norm=max_grad_norm,
adam_beta1=adam_beta1,
adam_beta2=adam_beta2,
)
with self.assertRaises(Exception) as context:
trainer.train()
for key in keys:
self.assertTrue(
key in str(context.exception),
f"{key} is not in the exception message:\n{context.exception}",
)
# Test various combos
# 1. DS scheduler + DS optimizer: this is already tested by most other tests
# 2. HF scheduler + HF optimizer:
# 3. DS scheduler + HF optimizer:
# 4. HF scheduler + DS optimizer:
def test_hf_scheduler_hf_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer
del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
def test_ds_scheduler_hf_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
def test_hf_scheduler_ds_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
@require_deepspeed_aio
def test_stage3_nvme_offload(self):
with mockenv_context(**self.dist_env_1_gpu):
# this actually doesn't have to be on NVMe, any storage will do since this test only
# runs a simple check that we can use some directory as if it were NVMe
nvme_path = self.get_auto_remove_tmp_dir()
nvme_config = {"device": "nvme", "nvme_path": nvme_path}
ds_config_zero3_dict = self.get_config_dict(ZERO3)
ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config
ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config
trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@require_optuna
def test_hyperparameter_search(self):
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero3_dict = self.get_config_dict(ZERO3)
# hyperparameter_search requires model_init() to recreate the model for each trial
def model_init():
config = RegressionModelConfig(a=0, b=0, double_output=False)
model = RegressionPreTrainedModel(config)
return model
trainer = get_regression_trainer(
local_rank=0,
fp16=True,
model_init=model_init,
deepspeed=ds_config_zero3_dict,
)
n_trials = 3
with CaptureLogger(deepspeed_logger) as cl:
with CaptureStd() as cs:
trainer.hyperparameter_search(direction="maximize", n_trials=n_trials)
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
self.assertIn(f"Trial {n_trials-1} finished with value", cs.err, "expected hyperparameter_search output")
self.assertIn("Best is trial", cs.err, "expected hyperparameter_search output")
# --- These tests need to run on both zero stages --- #
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_hf_optimizer_with_offload(self, stage, dtype):
# non-DS optimizers can be used with ZERO-offload (as long as they have both CPU and GPU implementation (except LAMB))
ds_config_dict = self.get_config_dict(stage)
del ds_config_dict["optimizer"] # force default HF Trainer optimizer
# force cpu offload
ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu"
ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam
with mockenv_context(**self.dist_env_1_gpu):
kwargs = {"local_rank": 0, "deepspeed": ds_config_dict}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_fake_notebook_no_launcher(self, stage, dtype):
# this setup emulates a notebook where a launcher needs to be emulated by hand
# note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture
# DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if
# it's run not as a first test as `sys.stdout` will no longer be the same. So we either have
# to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger.
with mockenv_context(**self.dist_env_1_gpu):
kwargs = {"local_rank": 0, "deepspeed": self.get_config_dict(stage)}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_early_get_last_lr(self, stage, dtype):
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
# not run for the first few dozen steps while loss scale is too large, and thus during
# that time `get_last_lr` will fail if called during that warm up stage,
#
# setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
# `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
with mockenv_context(**self.dist_env_1_gpu):
a = b = 0.0
kwargs = {
"a": a,
"b": b,
"local_rank": 0,
"train_len": 8,
"deepspeed": self.get_config_dict(stage),
"per_device_train_batch_size": 8,
"logging_steps": 1,
}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
trainer.train()
post_train_a = trainer.model.a.item()
# XXX: for some reason the following check fails with zero3/fp16 and any/bf16 - not a
# broken but a different qualitative outcome - as if optimizer did run
# oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere
# print(trainer.model.a.item())
# print(trainer.model.b.item())
# need to investigate at some point
if (stage == ZERO3 and dtype == FP16) or (dtype == BF16):
return
# it's enough that train didn't fail for this test, but we must check that
# optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
self.assertEqual(post_train_a, a)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_gradient_accumulation(self, stage, dtype):
# this test measures that we get identical weights and similar loss with:
# 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
# 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
# since the 2nd should produce the effective batch of 1st, with the same results
#
# I can get an identical loss for a small train_len=32, plus the power of the initial
# dynamic loss scale value set to:
# "fp16.initial_scale_power": 1
# plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
# but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
# the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical
train_len = 64
a = b = 0.0
kwargs = {
"a": a,
"b": b,
"local_rank": 0,
"train_len": train_len,
"deepspeed": self.get_config_dict(stage),
}
kwargs[dtype] = True
with mockenv_context(**self.dist_env_1_gpu):
no_grad_accum_trainer = get_regression_trainer(
**kwargs,
per_device_train_batch_size=16,
gradient_accumulation_steps=1,
)
no_grad_accum_result = no_grad_accum_trainer.train()
no_grad_accum_loss = no_grad_accum_result.training_loss
no_grad_accum_a = no_grad_accum_trainer.model.a.item()
no_grad_accum_b = no_grad_accum_trainer.model.b.item()
# make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
self.assertNotEqual(no_grad_accum_a, a)
with mockenv_context(**self.dist_env_1_gpu):
yes_grad_accum_trainer = get_regression_trainer(
**kwargs,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
)
yes_grad_accum_result = yes_grad_accum_trainer.train()
yes_grad_accum_loss = yes_grad_accum_result.training_loss
yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
self.assertNotEqual(yes_grad_accum_a, a)
# training with half the batch size but accumulation steps as 2 should give the same
# weights, but sometimes get a slight difference still of 1e-6
self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5)
self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5)
# see the note above how to get identical loss on a small bs
self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=2)
def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage, dtype):
# adapted from TrainerIntegrationCommon.check_saved_checkpoints
file_list = [SAFE_WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"]
if stage == ZERO2:
ds_file_list = ["mp_rank_00_model_states.pt"]
elif stage == ZERO3:
ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"]
else:
raise ValueError(f"unknown stage {stage}")
if dtype == "bf16":
ds_file_list.append("bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found")
# common files
for filename in file_list:
path = os.path.join(checkpoint, filename)
self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
# ds files
ds_path = os.path.join(checkpoint, f"global_step{step}")
for filename in ds_file_list:
# filename = os.path.join(path, filename)
# print(filename)
path = os.path.join(ds_path, filename)
self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_save_checkpoints(self, stage, dtype):
# adapted from TrainerIntegrationTest.test_save_checkpoints
freq = 5
output_dir = self.get_auto_remove_tmp_dir()
ds_config_dict = self.get_config_dict(stage)
if dtype == FP16:
ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
# XXX:
if stage == ZERO3:
ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True
# save checkpoints
with mockenv_context(**self.dist_env_1_gpu):
kwargs = {
"output_dir": output_dir,
"save_steps": freq,
"deepspeed": ds_config_dict,
}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
trainer.train()
total = int(self.n_epochs * 64 / self.batch_size)
self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage, dtype)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_can_resume_training_errors(self, stage, dtype):
with mockenv_context(**self.dist_env_1_gpu):
ds_config_dict = self.get_config_dict(stage)
output_dir = self.get_auto_remove_tmp_dir()
kwargs = {"output_dir": output_dir, "deepspeed": ds_config_dict}
kwargs[dtype] = True
trainer = get_regression_trainer(**kwargs)
# 1. fail to find any checkpoint - due a fresh output_dir
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=True)
self.assertTrue(
"No valid checkpoint found in output directory" in str(context.exception),
f"got exception: {context.exception}",
)
# 2. fail to find a bogus checkpoint
with self.assertRaises(Exception) as context:
checkpoint = os.path.join(output_dir, "checkpoint-5")
trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
self.assertTrue(
"Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}"
)
@parameterized.expand(params_with_optims_and_schedulers, name_func=parameterized_custom_name_func)
def test_can_resume_training_normal(self, stage, dtype, optim, scheduler):
# adapted from TrainerIntegrationTest.test_can_resume_training
# test normal resume for each stage separately, error-handling is tested in a different test
# ToDo: Currently, hf_optim + hf_scheduler resumes with the correct states and
# also has same losses for few steps but then slowly diverges. Need to figure it out.
if optim == HF_OPTIM and scheduler == HF_SCHEDULER:
return
output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False)
ds_config_dict = self.get_config_dict(stage)
if dtype == FP16:
ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
# XXX:
if stage == ZERO3:
ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True
if optim == HF_OPTIM:
del ds_config_dict["optimizer"]
if scheduler == HF_SCHEDULER:
del ds_config_dict["scheduler"]
kwargs = {
"output_dir": output_dir,
"train_len": 128,
"save_steps": 5,
"learning_rate": 0.1,
"deepspeed": ds_config_dict,
}
kwargs[dtype] = True
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(output_dir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(output_dir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Finally, should be able to resume with the same trainer/same deepspeed engine instance
# XXX: but currently this not possible due DS bug: https://github.com/microsoft/DeepSpeed/issues/1612
# trainer.train(resume_from_checkpoint=checkpoint)
# a workaround needs to be used that re-creates the deepspeed engine
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_load_state_dict_from_zero_checkpoint(self, stage, dtype):
# test that we can load fp32 weights directly from the zero checkpoint into the current model
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False)
ds_config_dict = self.get_config_dict(stage)
kwargs = {
"output_dir": output_dir,
"train_len": 4,
"per_device_train_batch_size": 4,
"num_train_epochs": 1,
"save_strategy": "steps",
"save_steps": 1,
"learning_rate": 0.1,
"deepspeed": ds_config_dict,
}
kwargs[dtype] = True
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint_dir = get_last_checkpoint(output_dir)
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
(a1, b1) = model.a.item(), model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_config_object(self):
# test that we can switch from zero2 to zero3 in the same process for example
# test is_zero, etc.
output_dir = self.get_auto_remove_tmp_dir()
kwargs = {"output_dir": output_dir, "train_len": 8, "fp16": True}
ds_config_zero3_dict = self.get_config_dict(ZERO3)
ds_config_zero2_dict = self.get_config_dict(ZERO2)
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
self.assertTrue(is_deepspeed_zero3_enabled())
# test we can repeat that and with train this time
trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
trainer.train()
self.assertTrue(is_deepspeed_zero3_enabled())
# test zero3 is disabled
trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs)
self.assertFalse(is_deepspeed_zero3_enabled())
# check config obj
config = deepspeed_config()
self.assertTrue(bool(config), "Deepspeed config should be accessible")
# with accelerate integration below line is additionally required for this test to pass
trainer.accelerator.state._reset_state()
del trainer
# now weakref should gc the global and we shouldn't get anything here
config = deepspeed_config()
self.assertFalse(is_deepspeed_zero3_enabled())
self.assertFalse(bool(config), "Deepspeed config should not be accessible")
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_load_best_model(self, stage, dtype):
# Test that forced deepspeed reinit doesn't break the model. the forced re-init after
# loading the best model in Trainer is there to workaround this bug in Deepspeed
# https://github.com/microsoft/DeepSpeed/issues/1612
#
# The test is derived from a repro script submitted in this Issue:
# https://github.com/huggingface/transformers/issues/17114
#
# One additional feature of this test is that we use a non-AdamW optimizer to test that
# deepspeed doesn't fallback to AdamW, which would prevent the optimizer states from loading
# correctly
from transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer # noqa
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False, before=False)
ds_config_dict = self.get_config_dict(stage)
del ds_config_dict["optimizer"] # will use HF Trainer optimizer
del ds_config_dict["scheduler"] # will use HF Trainer scheduler
ds_config_dict["zero_force_ds_cpu_optimizer"] = False # offload is not efficient w/o CPUAdam
# must use this setting to get the reload path exercised
ds_config_dict["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = True
with mockenv_context(**self.dist_env_1_gpu):
args_dict = {
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
"gradient_accumulation_steps": 1,
"learning_rate": 1e-4,
"num_train_epochs": 1,
"do_train": True,
"do_eval": True,
"optim": "adafactor",
"evaluation_strategy": "steps",
"eval_steps": 1,
"save_strategy": "steps",
"save_steps": 1,
"load_best_model_at_end": True,
"max_steps": 1,
"deepspeed": ds_config_dict,
"report_to": "none",
}
training_args = TrainingArguments(output_dir, **args_dict)
tokenizer = T5Tokenizer.from_pretrained(T5_TINY)
model = T5ForConditionalGeneration.from_pretrained(T5_TINY)
def _add_eos_to_examples(example):
example["input_text"] = f"question: {example['question']} context: {example['context']}"
example["target_text"] = example["answers"]["text"][0] if len(example["answers"]["text"]) > 0 else ""
return example
def _convert_to_features(example_batch):
input_encodings = tokenizer.batch_encode_plus(
example_batch["input_text"], pad_to_max_length=True, max_length=512, truncation=True
)
target_encodings = tokenizer.batch_encode_plus(
example_batch["target_text"], pad_to_max_length=True, max_length=16, truncation=True
)
encodings = {
"input_ids": input_encodings["input_ids"],
"attention_mask": input_encodings["attention_mask"],
"labels": target_encodings["input_ids"],
}
return encodings
def get_dataset():
data_file = str(self.tests_dir / "fixtures/tests_samples/SQUAD/sample.json")
data_files = {"train": data_file, "validation": data_file}
raw_datasets = datasets.load_dataset("json", data_files=data_files, field="data")
train_dataset = raw_datasets["train"].map(_add_eos_to_examples).map(_convert_to_features, batched=True)
valid_dataset = deepcopy(train_dataset)
return train_dataset, valid_dataset
train_dataset, eval_dataset = get_dataset()
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train() # crash 1 was here
trainer.evaluate() # crash 2 was here
@slow
@require_deepspeed
@require_torch_accelerator
class TestDeepSpeedWithLauncher(TestCasePlus):
"""This class is for testing via an external script - can do multiple gpus"""
# Tests to devise #
#
# 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that
# the 2 gpus will generate prediction sequences that aren't of the same length - this is because
# we had to code a special feature to sync the gpus when the predicted sequences aren't of the
# same length. In general this will tested as a side-effect through a variety of other tests -
# it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as
# long as we have a few full tests running on zero3 + predict_with_generate this should be
# mostly covered.
#
# but there are 5 variations on beam search in `generate`- with identical code branched with `if
# synced_gpus`
#
# 2. most tests should probably be run on both: zero2 and zero3 configs
#
@parameterized.expand(params, name_func=parameterized_custom_name_func)
@require_torch_multi_accelerator
def test_basic_distributed(self, stage, dtype):
self.run_and_check(stage=stage, dtype=dtype, distributed=True)
def test_do_eval_no_train(self):
# testing only zero3 since zero2 makes no sense with inference
self.run_and_check(
stage=ZERO3,
dtype=FP16,
eval_steps=1,
distributed=False,
do_train=False,
do_eval=True,
)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_fp32_non_distributed(self, stage, dtype):
# real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
# therefore no quality checks, just basic completion checks are done
self.run_and_check(
stage=stage,
dtype=dtype,
model_name=T5_TINY,
distributed=False,
do_train=True,
do_eval=True,
quality_checks=False,
fp32=True,
)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
@require_torch_multi_accelerator
def test_fp32_distributed(self, stage, dtype):
# real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
# therefore no quality checks, just basic completion checks are done
self.run_and_check(
stage=stage,
dtype=dtype,
model_name=T5_TINY,
distributed=True,
do_train=True,
do_eval=True,
quality_checks=False,
fp32=True,
)
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_resume_train_not_from_ds_checkpoint(self, stage, dtype):
# do normal training and then resume not from the deepspeed checkpoint but explicitly from
# the saved model dir
do_train = True
do_eval = False
kwargs = {
"stage": stage,
"dtype": dtype,
"eval_steps": 1,
"distributed": True,
"do_train": do_train,
"do_eval": do_eval,
}
# 1. normal training
output_dir = self.run_and_check(**kwargs)
# 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir
# - i.e. the same path the model was saved to in step 1
output_dir = self.run_trainer(**kwargs, model_name=output_dir)
self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)
@parameterized.expand(["bf16", "fp16", "fp32"])
@require_torch_multi_accelerator
def test_inference(self, dtype):
if dtype == "bf16" and not is_torch_bf16_available_on_device(torch_device):
self.skipTest("test requires bfloat16 hardware support")
# this is just inference, so no optimizer should be loaded
# it only works for z3 (makes no sense with z1-z2)
fp32 = True if dtype == "fp32" else False
self.run_and_check(
stage=ZERO3,
dtype=FP16,
model_name=T5_TINY,
distributed=True,
do_train=False,
do_eval=True,
quality_checks=False,
fp32=fp32,
)
def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True):
if do_train:
train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
self.assertIn("train_samples_per_second", train_metrics)
if quality_checks:
self.assertGreater(train_metrics["train_samples_per_second"], 0.5)
if do_eval:
eval_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
self.assertIn("eval_bleu", eval_metrics)
if quality_checks:
self.assertGreater(eval_metrics["eval_bleu"], 1)
# XXX: need to do better validation beyond just that the run was successful
def run_and_check(
self,
stage,
dtype,
model_name: str = T5_SMALL,
eval_steps: int = 10,
distributed: bool = True,
do_train: bool = True,
do_eval: bool = True,
quality_checks: bool = True,
fp32: bool = False,
extra_args_str: str = None,
remove_args_str: str = None,
):
# we are doing quality testing so using a small real model
output_dir = self.run_trainer(
stage=stage,
dtype=dtype,
model_name=model_name,
eval_steps=eval_steps,
num_train_epochs=1,
do_train=do_train,
do_eval=do_eval,
distributed=distributed,
fp32=fp32,
extra_args_str=extra_args_str,
remove_args_str=remove_args_str,
)
self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks)
return output_dir
def run_trainer(
self,
stage: str,
dtype: str,
model_name: str,
eval_steps: int = 10,
num_train_epochs: int = 1,
do_train: bool = False,
do_eval: bool = True,
distributed: bool = True,
fp32: bool = False,
extra_args_str: str = None,
remove_args_str: str = None,
):
max_len = 32
data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--output_dir {output_dir}
--overwrite_output_dir
--max_source_length {max_len}
--max_target_length {max_len}
--val_max_target_length {max_len}
--warmup_steps 8
--predict_with_generate
--save_steps 0
--eval_steps {eval_steps}
--group_by_length
--label_smoothing_factor 0.1
--source_lang en
--target_lang ro
--report_to none
""".split()
args.extend(["--source_prefix", '"translate English to Romanian: "'])
if not fp32:
args.extend([f"--{dtype}"])
actions = 0
if do_train:
actions += 1
args.extend(
f"""
--do_train
--num_train_epochs {str(num_train_epochs)}
--max_train_samples 16
--per_device_train_batch_size 2
--learning_rate 3e-3
""".split()
)
if do_eval:
actions += 1
args.extend(
"""
--do_eval
--max_eval_samples 16
--per_device_eval_batch_size 2
""".split()
)
assert actions > 0, "need at least do_train or do_eval for the test to run"
if extra_args_str is not None:
args.extend(extra_args_str.split())
# currently only works for bool args
if remove_args_str is not None:
remove_args = remove_args_str.split()
args = [x for x in args if x not in remove_args]
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
launcher = get_launcher(distributed)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
return output_dir
@parameterized.expand(params, name_func=parameterized_custom_name_func)
def test_clm(self, stage, dtype):
# this test exercises model.resize_token_embeddings() which requires param gathering outside
# of forward - it's not used by `run_translation.py`, but it is in `run_clm.py`
data_dir = self.tests_dir / "fixtures"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path {GPT2_TINY}
--train_file {data_dir}/sample_text.txt
--validation_file {data_dir}/sample_text.txt
--output_dir {output_dir}
--overwrite_output_dir
--do_train
--do_eval
--max_train_samples 16
--max_eval_samples 16
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--num_train_epochs 1
--warmup_steps 8
--block_size 64
--report_to none
""".split()
args.extend([f"--{dtype}"])
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
launcher = get_launcher(distributed=True)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
def test_clm_from_config_zero3_fp16(self):
# this test exercises AutoModel.from_config(config) - to ensure zero.Init is called
data_dir = self.tests_dir / "fixtures"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_type gpt2
--tokenizer_name {GPT2_TINY}
--train_file {data_dir}/sample_text.txt
--validation_file {data_dir}/sample_text.txt
--output_dir {output_dir}
--overwrite_output_dir
--do_train
--max_train_samples 4
--per_device_train_batch_size 2
--num_train_epochs 1
--warmup_steps 8
--block_size 8
--fp16
--report_to none
""".split()
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split()
script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
launcher = get_launcher(distributed=True)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
with CaptureStderr() as cs:
execute_subprocess_async(cmd, env=self.get_env())
self.assertIn("Detected DeepSpeed ZeRO-3", cs.err)
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/deepspeed/vit_feature_extractor.json
|
{
"feature_extractor_type": "ViTFeatureExtractor",
"size": 30
}
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/deepspeed/ds_config_zero3.json
|
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_16bit_weights_on_model_save": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/deepspeed/ds_config_zero2.json
|
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"betas": "auto",
"eps": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 2e8,
"contiguous_gradients": true
},
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/benchmark/test_benchmark.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class BenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sgugger/tiny-distilbert-classification"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
only_pretrain_model=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_torchscript(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
torchscript=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_inference_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
fp16=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_model_no_architectures(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
# set architectures equal to `None`
config.architectures = None
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_train_no_configs_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
fp16=True,
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
multi_process=False,
)
benchmark = PyTorchBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/benchmark/test_benchmark_tf.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class TFBenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs_eager(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sgugger/tiny-distilbert-classification"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
only_pretrain_model=True,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_graph(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_with_configs_eager(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_with_configs_graph(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "patrickvonplaten/t5-tiny-random"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.")
def test_inference_no_configs_xla(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
use_xla=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
inference=True,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/bettertransformer/test_integration.py
|
# coding=utf-8
# Copyright 2023 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class BetterTransformerIntegrationTest(unittest.TestCase):
# refer to the full test suite in Optimum library:
# https://github.com/huggingface/optimum/tree/main/tests/bettertransformer
def test_transform_and_reverse(self):
r"""
Classic tests to simply check if the conversion has been successfull.
"""
model_id = "hf-internal-testing/tiny-random-t5"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
inp = tokenizer("This is me", return_tensors="pt")
model = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules()))
output = model.generate(**inp)
model = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_reloaded = AutoModelForSeq2SeqLM.from_pretrained(tmpdirname)
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules())
)
output_from_pretrained = model_reloaded.generate(**inp)
self.assertTrue(torch.allclose(output, output_from_pretrained))
def test_error_save_pretrained(self):
r"""
The save_pretrained method should raise a ValueError if the model is in BetterTransformer mode.
All should be good if the model is reversed.
"""
model_id = "hf-internal-testing/tiny-random-t5"
model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
model = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(ValueError):
model.save_pretrained(tmpdirname)
model = model.reverse_bettertransformer()
model.save_pretrained(tmpdirname)
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_trainer_tpu.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test is meant to be run in on an instance with TPUs like this:
#
# python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py
#
# Replace 8 with the number of TPU cores you have.
#
import sys
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.utils import logging
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class DummyDataset(Dataset):
def __init__(self, length: int = 101):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
return i
class DummyDataCollator:
def __call__(self, features):
return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
self.fc = nn.Linear(120, 80)
def forward(self, input_ids, labels=None):
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
def main():
parser = HfArgumentParser((TrainingArguments,))
sys.argv += ["--output_dir", "./examples"]
training_args = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, "
f"tpu_num_cores: {training_args.tpu_num_cores}",
)
# Essentially, what we want to verify in the distributed case is
# that we get all samples back, in the right order.
# (this is crucial for prediction for instance)
for dataset_length in [1001, 256, 15]:
dataset = DummyDataset(dataset_length)
def compute_metrics(p: EvalPrediction) -> Dict:
sequential = list(range(len(dataset)))
success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
return {"success": success}
trainer = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = 2
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = None
logger.info("🔥 All distributed tests successful")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_trainer_distributed.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_multi_xpu,
require_torch_neuroncore,
require_torch_npu,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset, IterableDataset
from transformers import Trainer
class DummyDataset(Dataset):
def __init__(self, length: int = 101):
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
return i
class DummyDataCollator:
def __call__(self, features):
return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
self.fc = nn.Linear(120, 80)
def forward(self, input_ids, labels=None):
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class RegressionModel(nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.double_output = double_output
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class SampleIterableDataset(IterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)
def __iter__(self):
for i in range(len(self.dataset)):
yield self.dataset[i]
class FiniteIterableDataset(SampleIterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
super().__init__(a, b, length, seed, label_names)
self.current_sample = 0
def __iter__(self):
while self.current_sample < len(self.dataset):
yield self.dataset[self.current_sample]
self.current_sample += 1
class RegressionDataset:
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
np.random.seed(seed)
self.label_names = ["labels"] if label_names is None else label_names
self.length = length
self.x = np.random.normal(size=(length,)).astype(np.float32)
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
self.ys = [y.astype(np.float32) for y in self.ys]
def __len__(self):
return self.length
def __getitem__(self, i):
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
result["input_x"] = self.x[i]
return result
class TestTrainerDistributedNeuronCore(TestCasePlus):
@require_torch_neuroncore
def test_trainer(self):
distributed_args = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
output_dir = self.get_auto_remove_tmp_dir()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class TestTrainerDistributedNPU(TestCasePlus):
@require_torch_npu
def test_trainer(self):
distributed_args = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
output_dir = self.get_auto_remove_tmp_dir()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class TestTrainerDistributed(TestCasePlus):
@require_torch_multi_gpu
def test_trainer(self):
distributed_args = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
output_dir = self.get_auto_remove_tmp_dir()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
@require_torch_multi_xpu
class TestTrainerDistributedXPU(TestCasePlus):
def test_trainer(self):
distributed_args = f"""--nproc_per_node={torch.xpu.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
output_dir = self.get_auto_remove_tmp_dir()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
dataset = DummyDataset(dataset_length)
def compute_metrics(p: EvalPrediction) -> Dict:
sequential = list(range(len(dataset)))
success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}"
)
return {"success": success}
trainer = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = 2
metrics = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
p = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
trainer.args.eval_accumulation_steps = None
# Check that `dispatch_batches=False` will work on a finite iterable dataset
train_dataset = FiniteIterableDataset(label_names=["labels", "extra"], length=1)
model = RegressionModel()
training_args.per_device_train_batch_size = 1
training_args.max_steps = 1
training_args.dispatch_batches = False
trainer = Trainer(model, training_args, train_dataset=train_dataset)
trainer.train()
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_trainer_utils.py
|
# coding=utf-8
# Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
from transformers.data.data_collator import default_data_collator
from transformers.testing_utils import require_accelerate, require_torch
from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import IterableDataset
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers.tokenization_utils_base import BatchEncoding
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
get_parameter_names,
numpy_pad_and_concatenate,
torch_pad_and_concatenate,
)
class TstLayer(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(nn.functional.relu(self.linear1(x)))
h = nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
self.generator = torch.Generator()
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
number = torch.rand(1, generator=self.generator).item()
stop = number < self.p_stop
@require_torch
class TrainerUtilsTest(unittest.TestCase):
def test_distributed_tensor_gatherer(self):
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
world_size = 4
num_samples = 21
input_indices = [
[0, 1, 6, 7, 12, 13, 18, 19],
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
[5, 11, 17, 2],
]
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices in input_indices:
gatherer.add_arrays(predictions[indices])
result = gatherer.finalize()
self.assertTrue(np.array_equal(result, predictions))
# With nested tensors
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices in input_indices:
gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]])
result = gatherer.finalize()
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 2)
self.assertTrue(isinstance(result[1], list))
self.assertEqual(len(result[1]), 2)
self.assertTrue(np.array_equal(result[0], predictions))
self.assertTrue(np.array_equal(result[1][0], predictions))
self.assertTrue(np.array_equal(result[1][1], predictions))
def test_distributed_tensor_gatherer_different_shapes(self):
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
world_size = 4
num_samples = 21
input_indices = [
[0, 1, 6, 7, 12, 13, 18, 19],
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
[5, 11, 17, 2],
]
sequence_lengths = [8, 10, 13]
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays(predictions[indices, :seq_length])
result = gatherer.finalize()
# Remove the extra samples added at the end for a round multiple of num processes.
actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]]
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length]))
# With nested tensors
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]])
result = gatherer.finalize()
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length]))
self.assertTrue(np.array_equal(result[1], predictions))
# Check if works if varying seq_length is second
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]])
result = gatherer.finalize()
self.assertTrue(np.array_equal(result[0], predictions))
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length]))
def test_label_smoothing(self):
epsilon = 0.1
num_labels = 12
random_logits = torch.randn(4, 5, num_labels)
random_labels = torch.randint(0, num_labels, (4, 5))
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean()
self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
# With a few -100 labels
random_labels[0, 1] = -100
random_labels[2, 1] = -100
random_labels[2, 3] = -100
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
# Mask the log probs with the -100 labels
log_probs[0, 1] = 0.0
log_probs[2, 1] = 0.0
log_probs[2, 3] = 0.0
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17)
self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
def test_group_by_length(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices = list(LengthGroupedSampler(4, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(sorted(indices), list(range(100)))
def test_group_by_length_with_dict(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append({"input_ids": input_ids})
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(sorted(indices), list(range(6)))
def test_group_by_length_with_batch_encoding(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append(BatchEncoding({"input_ids": input_ids}))
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(sorted(indices), list(range(6)))
def test_distributed_length_grouped(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths))
indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices_process_0[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100)))
def test_get_parameter_names(self):
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
# fmt: off
self.assertEqual(
get_parameter_names(model, [nn.LayerNorm]),
['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias']
)
# fmt: on
def test_distributed_sampler_with_loop(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0)
shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1)
# Set seeds
shard1.set_epoch(0)
shard2.set_epoch(0)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = []
for sample1, sample2 in zip(samples1, samples2):
total += [sample1, sample2]
self.assertEqual(set(total[:length]), set(dataset))
self.assertEqual(set(total[length:]), set(total[: (len(total) - length)]))
def test_sequential_distributed_sampler(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0)
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
total = samples1 + samples2
self.assertListEqual(total[:length], dataset)
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
# With a batch_size passed
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size)
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = samples1 + samples2
self.assertListEqual(total[:length], dataset)
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0):
# Set the seed for the base dataset to get the proper reference.
dataset.generator.manual_seed(epoch)
reference = list(dataset)
shards = [
IterableDatasetShard(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard in shards:
shard.set_epoch(epoch)
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
for shard in shards:
# All shards know the total number of samples
self.assertEqual(shard.num_examples, len(reference))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
# Check equivalence between IterableDataset and ShardSampler
dataset.generator.manual_seed(epoch)
reference = list(dataset)
sampler_shards = [
ShardSampler(
reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard, sampler_shard in zip(shard_lists, sampler_shards):
self.assertListEqual(shard, list(sampler_shard))
def test_iterable_dataset_shard(self):
dataset = RandomIterableDataset()
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42)
def test_iterable_dataset_shard_with_length(self):
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i)
for i in range(2)
]
# Build expected shards: each process will have batches of size 4 until there is not enough elements to
# form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4)
expected_shards = [[], []]
current_shard = 0
for i in range(0, 96, 4):
expected_shards[current_shard].extend(list(range(i, i + 4)))
current_shard = 1 - current_shard
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i)
for i in range(2)
]
# When drop_last=False, we get two last full batches by looping back to the beginning.
expected_shards[0].extend(list(range(96, 100)))
expected_shards[1].extend(list(range(0, 4)))
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2):
shards = [
ShardSampler(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
reference = copy.copy(dataset)
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_shard_sampler(self):
for n_elements in [64, 123]:
dataset = list(range(n_elements))
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3)
@require_accelerate
def test_executable_batch_size(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size > 16:
raise RuntimeError("CUDA out of memory.")
mock_training_loop_function()
self.assertEqual(batch_sizes, [64, 32, 16])
@require_accelerate
def test_executable_batch_size_no_search(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
mock_training_loop_function()
self.assertEqual(batch_sizes, [64])
@require_accelerate
def test_executable_batch_size_with_error(self):
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
def mock_training_loop_function(batch_size):
raise RuntimeError("CUDA out of memory.")
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertEqual("CUDA out of memory", cm.args[0])
def test_pad_and_concatenate_with_1d(self):
"""Tests whether pad_and_concatenate works with scalars."""
array1 = 1.0
array2 = 2.0
result = numpy_pad_and_concatenate(array1, array2)
self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))
tensor1 = torch.tensor(1.0)
tensor2 = torch.tensor(2.0)
result = torch_pad_and_concatenate(tensor1, tensor2)
self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))
def test_remove_columns_collator(self):
class MockLogger:
def __init__(self) -> None:
self.called = 0
def info(self, msg):
self.called += 1
self.last_msg = msg
data_batch = [
{"col1": 1, "col2": 2, "col3": 3},
{"col1": 1, "col2": 2, "col3": 3},
]
logger = MockLogger()
remove_columns_collator = RemoveColumnsCollator(
default_data_collator, ["col1", "col2"], logger, "model", "training"
)
self.assertNotIn("col3", remove_columns_collator(data_batch))
# check that the logging message is printed out only once
remove_columns_collator(data_batch)
remove_columns_collator(data_batch)
self.assertEqual(logger.called, 1)
self.assertIn("col3", logger.last_msg)
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_trainer_callback.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class MyTestTrainerCallback(TrainerCallback):
"A callback that registers the events that goes through."
def __init__(self):
self.events = []
def on_init_end(self, args, state, control, **kwargs):
self.events.append("on_init_end")
def on_train_begin(self, args, state, control, **kwargs):
self.events.append("on_train_begin")
def on_train_end(self, args, state, control, **kwargs):
self.events.append("on_train_end")
def on_epoch_begin(self, args, state, control, **kwargs):
self.events.append("on_epoch_begin")
def on_epoch_end(self, args, state, control, **kwargs):
self.events.append("on_epoch_end")
def on_step_begin(self, args, state, control, **kwargs):
self.events.append("on_step_begin")
def on_step_end(self, args, state, control, **kwargs):
self.events.append("on_step_end")
def on_evaluate(self, args, state, control, **kwargs):
self.events.append("on_evaluate")
def on_predict(self, args, state, control, **kwargs):
self.events.append("on_predict")
def on_save(self, args, state, control, **kwargs):
self.events.append("on_save")
def on_log(self, args, state, control, **kwargs):
self.events.append("on_log")
def on_prediction_step(self, args, state, control, **kwargs):
self.events.append("on_prediction_step")
@require_torch
class TrainerCallbackTest(unittest.TestCase):
def setUp(self):
self.output_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.output_dir)
def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
train_dataset = RegressionDataset(length=train_len)
eval_dataset = RegressionDataset(length=eval_len)
config = RegressionModelConfig(a=a, b=b)
model = RegressionPreTrainedModel(config)
args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs)
return Trainer(
model,
args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=callbacks,
)
def check_callbacks_equality(self, cbs1, cbs2):
self.assertEqual(len(cbs1), len(cbs2))
# Order doesn't matter
cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)
cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)
for cb1, cb2 in zip(cbs1, cbs2):
if isinstance(cb1, type) and isinstance(cb2, type):
self.assertEqual(cb1, cb2)
elif isinstance(cb1, type) and not isinstance(cb2, type):
self.assertEqual(cb1, cb2.__class__)
elif not isinstance(cb1, type) and isinstance(cb2, type):
self.assertEqual(cb1.__class__, cb2)
else:
self.assertEqual(cb1, cb2)
def get_expected_events(self, trainer):
expected_events = ["on_init_end", "on_train_begin"]
step = 0
train_dl_len = len(trainer.get_eval_dataloader())
evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(train_dl_len):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def test_init_callback(self):
trainer = self.get_trainer()
expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
# Callbacks passed at init are added to the default callbacks
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(MyTestTrainerCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
trainer = self.get_trainer(disable_tqdm=True)
expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
def test_add_remove_callback(self):
expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
trainer = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(DefaultFlowCallback)
expected_callbacks.remove(DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer = self.get_trainer()
cb = trainer.pop_callback(DefaultFlowCallback)
self.assertEqual(cb.__class__, DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer.add_callback(DefaultFlowCallback)
expected_callbacks.insert(0, DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
# We can also add, pop, or remove by instance
trainer = self.get_trainer()
cb = trainer.callback_handler.callbacks[0]
trainer.remove_callback(cb)
expected_callbacks.remove(DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer = self.get_trainer()
cb1 = trainer.callback_handler.callbacks[0]
cb2 = trainer.pop_callback(cb1)
self.assertEqual(cb1, cb2)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
trainer.add_callback(cb1)
expected_callbacks.insert(0, DefaultFlowCallback)
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
def test_event_flow(self):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore", category=UserWarning)
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
# Independent log/save/eval
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5)
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5)
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps")
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch")
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
# A bit of everything
trainer = self.get_trainer(
callbacks=[MyTestTrainerCallback],
logging_steps=3,
save_steps=10,
eval_steps=5,
evaluation_strategy="steps",
)
trainer.train()
events = trainer.callback_handler.callbacks[-2].events
self.assertEqual(events, self.get_expected_events(trainer))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
trainer = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback],
)
assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_trainer.py
|
# coding=utf-8
# Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import gc
import json
import math
import os
import random
import re
import subprocess
import sys
import tempfile
import unittest
from itertools import product
from pathlib import Path
from typing import Dict, List
from unittest.mock import Mock, patch
import numpy as np
from huggingface_hub import HfFolder, delete_repo, list_repo_commits, list_repo_files
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import (
AutoTokenizer,
IntervalStrategy,
PretrainedConfig,
TrainingArguments,
get_polynomial_decay_schedule_with_warmup,
is_torch_available,
logging,
)
from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS
from transformers.testing_utils import (
ENDPOINT_STAGING,
TOKEN,
USER,
CaptureLogger,
TestCasePlus,
backend_device_count,
execute_subprocess_async,
get_gpu_count,
get_tests_dir,
is_staging_test,
require_accelerate,
require_intel_extension_for_pytorch,
require_optuna,
require_ray,
require_safetensors,
require_sentencepiece,
require_sigopt,
require_tensorboard,
require_tokenizers,
require_torch,
require_torch_accelerator,
require_torch_bf16,
require_torch_gpu,
require_torch_multi_accelerator,
require_torch_non_multi_accelerator,
require_torch_non_multi_gpu,
require_torch_tensorrt_fx,
require_torch_tf32,
require_torch_up_to_2_accelerators,
require_torchdynamo,
require_wandb,
slow,
torch_device,
)
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend
from transformers.training_args import OptimizerNames
from transformers.utils import (
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
is_apex_available,
is_bitsandbytes_available,
is_safetensors_available,
is_torchdistx_available,
)
from transformers.utils.hp_naming import TrialShortNamer
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import IterableDataset
import transformers.optimization
from transformers import (
AutoModelForSequenceClassification,
EarlyStoppingCallback,
GlueDataset,
GlueDataTrainingArguments,
GPT2Config,
GPT2LMHeadModel,
LineByLineTextDataset,
PreTrainedModel,
Trainer,
TrainerState,
)
from transformers.modeling_utils import unwrap_model
if is_safetensors_available():
import safetensors.torch
PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
class RegressionDataset:
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
np.random.seed(seed)
self.label_names = ["labels"] if label_names is None else label_names
self.length = length
self.x = np.random.normal(size=(length,)).astype(np.float32)
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
self.ys = [y.astype(np.float32) for y in self.ys]
def __len__(self):
return self.length
def __getitem__(self, i):
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
result["input_x"] = self.x[i]
return result
@dataclasses.dataclass
class RegressionTrainingArguments(TrainingArguments):
a: float = 0.0
b: float = 0.0
keep_report_to: bool = False
def __post_init__(self):
super().__post_init__()
# save resources not dealing with reporting unless specified (also avoids the warning when it's not set)
# can be explicitly disabled via `keep_report_to`
if not self.keep_report_to:
self.report_to = []
class RepeatDataset:
def __init__(self, x, length=64):
self.x = x
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.x, "labels": self.x}
class DynamicShapesDataset:
def __init__(self, length=64, seed=42, batch_size=8):
self.length = length
np.random.seed(seed)
sizes = np.random.randint(1, 20, (length // batch_size,))
# For easy batching, we make every batch_size consecutive samples the same size.
self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_x": self.xs[i], "labels": self.ys[i]}
class AlmostAccuracy:
def __init__(self, thresh=0.25):
self.thresh = thresh
def __call__(self, eval_pred):
predictions, labels = eval_pred
true = np.abs(predictions - labels) <= self.thresh
return {"accuracy": true.astype(np.float32).mean().item()}
class RegressionModelConfig(PretrainedConfig):
def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
self.double_output = double_output
self.random_torch = random_torch
self.hidden_size = 1
if is_torch_available():
class SampleIterableDataset(IterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)
def __iter__(self):
for i in range(len(self.dataset)):
yield self.dataset[i]
class FiniteIterableDataset(SampleIterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
super().__init__(a, b, length, seed, label_names)
self.current_sample = 0
def __iter__(self):
while self.current_sample < len(self.dataset):
yield self.dataset[self.current_sample]
self.current_sample += 1
class MultiLoader:
def __init__(self, loaders):
self.loaders = loaders
def __len__(self):
return sum(len(loader) for loader in self.loaders)
def __iter__(self):
for loader in self.loaders:
yield from loader
class CustomDataloaderTrainer(Trainer):
def get_train_dataloader(self):
dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()]
return MultiLoader(dataloaders)
def get_eval_dataloader(self, eval_dataset):
dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)]
return MultiLoader(dataloaders)
class RegressionModel(nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.double_output = double_output
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionDictModel(nn.Module):
def __init__(self, a=0, b=0):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
result = {"output": y}
if labels is not None:
result["loss"] = nn.functional.mse_loss(y, labels)
return result
class RegressionPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = nn.Parameter(torch.tensor(config.a).float())
self.b = nn.Parameter(torch.tensor(config.b).float())
self.double_output = config.double_output
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
supports_gradient_checkpointing = True
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)])
self.head = nn.Linear(config.hidden_size, 1)
self.gradient_checkpointing = False
self.double_output = config.double_output
def forward(self, input_x, labels=None, **kwargs):
y = input_x.unsqueeze(0)
for layer in self.layers:
if self.training and self.gradient_checkpointing:
outputs = self._gradient_checkpointing_func(layer.__call__, y)
else:
outputs = layer(y)
y = outputs * 3
logits = self.head(y)
if labels is None:
return (logits, logits) if self.double_output else (logits,)
loss = nn.functional.mse_loss(logits, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionRandomPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = nn.Parameter(torch.tensor(config.a).float())
self.b = nn.Parameter(torch.tensor(config.b).float())
self.random_torch = config.random_torch
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if self.random_torch:
torch_rand = torch.randn(1).squeeze()
np_rand = np.random.rand()
rand_rand = random.random()
if self.random_torch:
y += 0.05 * torch_rand
y += 0.05 * torch.tensor(np_rand + rand_rand)
if labels is None:
return (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y)
class TstLayer(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(nn.functional.relu(self.linear1(x)))
h = nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
def get_regression_trainer(
a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, **kwargs
):
label_names = kwargs.get("label_names", None)
gradient_checkpointing = kwargs.get("gradient_checkpointing", False)
train_dataset = RegressionDataset(length=train_len, label_names=label_names)
eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)
model_init = kwargs.pop("model_init", None)
if model_init is not None:
model = None
else:
if pretrained:
config = RegressionModelConfig(a=a, b=b, double_output=double_output)
# We infer the correct model class if one uses gradient_checkpointing or not
target_cls = (
RegressionPreTrainedModel
if not gradient_checkpointing
else RegressionPreTrainedModelWithGradientCheckpointing
)
model = target_cls(config)
else:
model = RegressionModel(a=a, b=b, double_output=double_output)
compute_metrics = kwargs.pop("compute_metrics", None)
data_collator = kwargs.pop("data_collator", None)
optimizers = kwargs.pop("optimizers", (None, None))
output_dir = kwargs.pop("output_dir", "./regression")
preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None)
args = RegressionTrainingArguments(output_dir, a=a, b=b, keep_report_to=keep_report_to, **kwargs)
return Trainer(
model,
args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=optimizers,
model_init=model_init,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
class TrainerIntegrationCommon:
def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=True):
weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME
file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"]
if is_pretrained:
file_list.append("config.json")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint))
for filename in file_list:
self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))
def check_best_model_has_been_loaded(
self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True
):
checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}")
log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history
values = [d[metric] for d in log_history]
best_value = max(values) if greater_is_better else min(values)
best_checkpoint = (values.index(best_value) + 1) * freq
checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}")
if is_pretrained:
best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
best_model.to(trainer.args.device)
else:
best_model = RegressionModel()
if not safe_weights:
state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
else:
state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
self.assertTrue(torch.allclose(best_model.a, trainer.model.a))
self.assertTrue(torch.allclose(best_model.b, trainer.model.b))
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
# We'll pop things so operate on copies.
state = trainer_state.copy()
state1 = trainer_state1.copy()
# Log history main contain different logs for the time metrics (after resuming a training).
log_history = state.pop("log_history", None)
log_history1 = state1.pop("log_history", None)
self.assertEqual(state, state1)
skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"]
for log, log1 in zip(log_history, log_history1):
for key in skip_log_keys:
_ = log.pop(key, None)
_ = log1.pop(key, None)
self.assertEqual(log, log1)
def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True):
# Converts a checkpoint of a regression model to a sharded checkpoint.
if load_safe:
loader = safetensors.torch.load_file
weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME)
else:
loader = torch.load
weights_file = os.path.join(folder, WEIGHTS_NAME)
if save_safe:
extension = "safetensors"
saver = safetensors.torch.save_file
index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME)
shard_name = SAFE_WEIGHTS_NAME
else:
extension = "bin"
saver = torch.save
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
shard_name = WEIGHTS_NAME
state_dict = loader(weights_file)
os.remove(weights_file)
keys = list(state_dict.keys())
shard_files = [
shard_name.replace(f".{extension}", f"-{idx+1:05d}-of-{len(keys):05d}.{extension}")
for idx in range(len(keys))
]
index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}}
with open(index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
for param_name, shard_file in zip(keys, shard_files):
saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file))
@require_torch
@require_sentencepiece
@require_tokenizers
class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon):
"""
Only tests that want to tap into the auto-pre-run 2 trainings:
- self.default_trained_model
- self.alternate_trained_model
directly, or via check_trained_model
"""
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.default_trained_model = (trainer.model.a, trainer.model.b)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.alternate_trained_model = (trainer.model.a, trainer.model.b)
def check_trained_model(self, model, alternate_seed=False):
# Checks a training seeded with learning_rate = 0.1
(a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model
self.assertTrue(torch.allclose(model.a, a))
self.assertTrue(torch.allclose(model.b, b))
def test_reproducible_training(self):
# Checks that training worked, model trained and seed made a reproducible training.
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
# Checks that a different seed gets different (reproducible) results.
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_trainer_with_datasets(self):
import datasets
np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32)
y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,))
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y})
# Base training. Should have the same results as test_reproducible_training
model = RegressionModel()
args = TrainingArguments("./regression", learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Can return tensors.
train_dataset.set_format(type="torch", dtype=torch.float32)
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Adding one column not used by the model should have no impact
z = np.random.normal(size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z})
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
def test_model_init(self):
train_dataset = RegressionDataset()
args = TrainingArguments("./regression", learning_rate=0.1)
trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel())
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results.
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results and new seed should be used.
trainer.args.seed = 314
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_gradient_accumulation(self):
# Training with half the batch size but accumulation steps as 2 should give the same results.
trainer = get_regression_trainer(
gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1
)
trainer.train()
self.check_trained_model(trainer.model)
def test_gradient_checkpointing(self):
trainer = get_regression_trainer(
per_device_train_batch_size=1,
learning_rate=0.1,
gradient_checkpointing=True,
gradient_checkpointing_kwargs={"use_reentrant": False},
)
previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()}
trainer.train()
# Check if model weights have been updated
for k, v in trainer.model.named_parameters():
self.assertFalse(
torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4),
f"Model weights for {k} have not been updated",
)
def test_training_loss(self):
n_gpus = max(1, backend_device_count(torch_device))
# With even logs
trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus))
trainer.train()
log_history = trainer.state.log_history
losses = [log["loss"] for log in log_history if "loss" in log]
train_loss = log_history[-1]["train_loss"]
self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4)
# With uneven logs
trainer = get_regression_trainer(logging_steps=5)
trainer.train()
log_history = trainer.state.log_history
# Training loss should be the same as before
new_train_loss = log_history[-1]["train_loss"]
self.assertAlmostEqual(train_loss, new_train_loss, places=4)
def test_custom_optimizer(self):
train_dataset = RegressionDataset()
args = TrainingArguments("./regression")
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0)
def test_lr_scheduler_kwargs(self):
# test scheduler kwargs passed via TrainingArguments
train_dataset = RegressionDataset()
model = RegressionModel()
num_steps, num_warmup_steps = 10, 2
extra_kwargs = {"power": 5.0, "lr_end": 1e-5} # Non-default arguments
args = TrainingArguments(
"./regression",
lr_scheduler_type="polynomial",
lr_scheduler_kwargs=extra_kwargs,
learning_rate=0.2,
warmup_steps=num_warmup_steps,
)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.create_optimizer_and_scheduler(num_training_steps=num_steps)
# Checking that the scheduler was created
self.assertIsNotNone(trainer.lr_scheduler)
# Checking that the correct args were passed
sched1 = trainer.lr_scheduler
sched2 = get_polynomial_decay_schedule_with_warmup(
trainer.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_steps, **extra_kwargs
)
self.assertEqual(sched1.lr_lambdas[0].args, sched2.lr_lambdas[0].args)
self.assertEqual(sched1.lr_lambdas[0].keywords, sched2.lr_lambdas[0].keywords)
def test_reduce_lr_on_plateau_args(self):
# test passed arguments for a custom ReduceLROnPlateau scheduler
train_dataset = RegressionDataset(length=64)
eval_dataset = RegressionDataset(length=64)
args = TrainingArguments(
"./regression",
evaluation_strategy="epoch",
metric_for_best_model="eval_loss",
)
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2)
trainer = Trainer(
model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, optimizers=(optimizer, lr_scheduler)
)
trainer.train()
self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)
self.assertEqual(trainer.lr_scheduler.factor, 0.2)
self.assertEqual(trainer.lr_scheduler.patience, 5)
self.assertEqual(trainer.lr_scheduler.cooldown, 2)
def test_reduce_lr_on_plateau(self):
# test the ReduceLROnPlateau scheduler
class TrainerWithLRLogs(Trainer):
def log(self, logs):
# the LR is computed after metrics and does not exist for the first epoch
if hasattr(self.lr_scheduler, "_last_lr"):
logs["learning_rate"] = self.lr_scheduler._last_lr[0]
super().log(logs)
train_dataset = RegressionDataset(length=64)
eval_dataset = RegressionDataset(length=64)
args = TrainingArguments(
"./regression",
lr_scheduler_type="reduce_lr_on_plateau",
evaluation_strategy="epoch",
metric_for_best_model="eval_loss",
num_train_epochs=10,
learning_rate=0.2,
)
model = RegressionModel()
trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)
patience = trainer.lr_scheduler.patience
logs = trainer.state.log_history[1:]
best_loss = logs[0]["eval_loss"]
bad_epochs = 0
for i, log in enumerate(logs[:-1]): # Compare learning rate to next epoch's
loss = log["eval_loss"]
just_decreased = False
if loss > best_loss:
bad_epochs += 1
if bad_epochs > patience:
self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"])
just_decreased = True
bad_epochs = 0
else:
best_loss = loss
bad_epochs = 0
if not just_decreased:
self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"])
def test_adafactor_lr_none(self):
# test the special case where lr=None, since Trainer can't not have lr_scheduler
from transformers.optimization import Adafactor, AdafactorSchedule
train_dataset = RegressionDataset()
args = TrainingArguments("./regression")
model = RegressionModel()
optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
lr_scheduler = AdafactorSchedule(optimizer)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0)
@require_torch_accelerator
@require_torch_bf16
def test_mixed_bf16(self):
# very basic test
trainer = get_regression_trainer(learning_rate=0.1, bf16=True)
trainer.train()
self.check_trained_model(trainer.model)
# --bf16 --half_precision_backend apex can't be used together
with self.assertRaises(ValueError):
trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend="apex")
# will add more specific tests once there are some bugs to fix
@require_torch_gpu
@require_torch_tf32
def test_tf32(self):
# very basic test
trainer = get_regression_trainer(learning_rate=0.1, tf32=True)
trainer.train()
self.check_trained_model(trainer.model)
@require_torch
@require_sentencepiece
@require_tokenizers
class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_trainer_works_with_dict(self):
# Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break
# anything.
train_dataset = RegressionDataset()
eval_dataset = RegressionDataset()
model = RegressionDictModel()
args = TrainingArguments("./regression")
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
_ = trainer.evaluate()
_ = trainer.predict(eval_dataset)
def test_evaluation_with_keys_to_drop(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,))
eval_dataset = RepeatDataset(x)
args = TrainingArguments("./test")
trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset)
# By default the past_key_values are removed
result = trainer.predict(eval_dataset)
self.assertTrue(isinstance(result.predictions, np.ndarray))
# We can still get them by setting ignore_keys to []
result = trainer.predict(eval_dataset, ignore_keys=[])
self.assertTrue(isinstance(result.predictions, tuple))
self.assertEqual(len(result.predictions), 2)
def test_training_arguments_are_left_untouched(self):
trainer = get_regression_trainer()
trainer.train()
args = TrainingArguments("./regression", report_to=[])
dict1, dict2 = args.to_dict(), trainer.args.to_dict()
for key in dict1.keys():
# Logging dir can be slightly different as they default to something with the time.
if key != "logging_dir":
self.assertEqual(dict1[key], dict2[key])
def test_number_of_steps_in_training(self):
# Regular training has n_epochs * len(train_dl) steps
trainer = get_regression_trainer(learning_rate=0.1)
train_output = trainer.train()
self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size)
# Check passing num_train_epochs works (and a float version too):
trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size))
# If we pass a max_steps, num_train_epochs is ignored
trainer = get_regression_trainer(learning_rate=0.1, max_steps=10)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 10)
@require_torch_bf16
@require_intel_extension_for_pytorch
def test_number_of_steps_in_training_with_ipex(self):
for mix_bf16 in [True, False]:
# Regular training has n_epochs * len(train_dl) steps
trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, use_cpu=True)
train_output = trainer.train()
self.assertEqual(train_output.global_step, self.n_epochs * 64 / trainer.args.train_batch_size)
# Check passing num_train_epochs works (and a float version too):
trainer = get_regression_trainer(
learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, use_cpu=True
)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(1.5 * 64 / trainer.args.train_batch_size))
# If we pass a max_steps, num_train_epochs is ignored
trainer = get_regression_trainer(
learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, use_cpu=True
)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 10)
def test_neftune(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,))
train_dataset = RepeatDataset(x)
# Trainer without inf/nan filter
args = TrainingArguments(
"./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4
)
trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)
trainer.model = trainer._activate_neftune(trainer.model)
dummy_input = torch.LongTensor([[1, 0, 1]]).to(torch_device)
emb1 = trainer.model.get_input_embeddings()(dummy_input)
emb2 = trainer.model.get_input_embeddings()(dummy_input)
self.assertFalse(torch.allclose(emb1, emb2), "Neftune noise is not applied!")
# redefine the model
tiny_gpt2 = GPT2LMHeadModel(config)
# Trainer without inf/nan filter
args = TrainingArguments(
"./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4
)
trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)
# Check that it trains without errors
trainer.train()
# Make sure forward pass works fine
_ = trainer.model(dummy_input)
self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0)
trainer.model.eval()
# Check that we get identical embeddings just in case
emb1 = trainer.model.get_input_embeddings()(dummy_input)
emb2 = trainer.model.get_input_embeddings()(dummy_input)
self.assertTrue(torch.allclose(emb1, emb2), "Neftune noise is still applied!")
def test_logging_inf_nan_filter(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,))
train_dataset = RepeatDataset(x)
# Trainer without inf/nan filter
args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False)
trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)
trainer.train()
log_history_no_filter = trainer.state.log_history
# Trainer with inf/nan filter
args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True)
trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)
trainer.train()
log_history_filter = trainer.state.log_history
def is_any_loss_nan_or_inf(log_history):
losses = [l["loss"] for l in log_history[:-1]]
return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses)
self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter))
self.assertFalse(is_any_loss_nan_or_inf(log_history_filter))
def test_train_and_eval_dataloaders(self):
n_gpu = max(1, backend_device_count(torch_device))
trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16)
self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu)
trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16)
self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16 * n_gpu)
# Check drop_last works
trainer = get_regression_trainer(
train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32
)
self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1)
self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1)
trainer = get_regression_trainer(
train_len=66,
eval_len=74,
learning_rate=0.1,
per_device_train_batch_size=16,
per_device_eval_batch_size=32,
dataloader_drop_last=True,
)
self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu))
self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu))
# Check passing a new dataset for evaluation works
new_eval_dataset = RegressionDataset(length=128)
self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu))
# tests that we do not require dataloader to have a .dataset attribute
def test_dataloader_without_dataset(self):
train_dataset = RegressionDataset(length=128)
trainer = CustomDataloaderTrainer(
model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset
)
trainer.train()
trainer.evaluate()
@require_torch_multi_accelerator
def test_data_is_not_parallelized_when_model_is_parallel(self):
model = RegressionModel()
# Make the Trainer believe it's a parallelized model
model.is_parallelizable = True
model.model_parallel = True
args = TrainingArguments("./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16)
trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset())
# Check the Trainer was fooled
self.assertTrue(trainer.is_model_parallel)
self.assertEqual(trainer.args.n_gpu, 1)
# The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu
self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16)
self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16)
self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16)
self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16)
def test_evaluate(self):
trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With logits preprocess
trainer = get_regression_trainer(
a=1.5,
b=2.5,
compute_metrics=AlmostAccuracy(),
preprocess_logits_for_metrics=lambda logits, labels: logits + 1,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_evaluate_with_jit(self):
trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(
a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With logits preprocess
trainer = get_regression_trainer(
a=1.5,
b=2.5,
compute_metrics=AlmostAccuracy(),
preprocess_logits_for_metrics=lambda logits, labels: logits + 1,
jit_mode_eval=True,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
@require_torch_bf16
@require_intel_extension_for_pytorch
def test_evaluate_with_ipex(self):
for mix_bf16 in [True, False]:
trainer = get_regression_trainer(
a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(
a=1.5,
b=2.5,
use_ipex=True,
eval_len=66,
compute_metrics=AlmostAccuracy(),
bf16=mix_bf16,
use_cpu=True,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With logits preprocess
trainer = get_regression_trainer(
a=1.5,
b=2.5,
use_ipex=True,
compute_metrics=AlmostAccuracy(),
preprocess_logits_for_metrics=lambda logits, labels: logits + 1,
bf16=mix_bf16,
use_cpu=True,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict(self):
trainer = get_regression_trainer(a=1.5, b=2.5)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With more than one output of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"])
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
def test_predict_with_jit(self):
trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With more than one output of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(
a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], jit_mode_eval=True
)
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
@require_torch_bf16
@require_intel_extension_for_pytorch
def test_predict_with_ipex(self):
for mix_bf16 in [True, False]:
trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, use_cpu=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, use_cpu=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With more than one output of the model
trainer = get_regression_trainer(
a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, use_cpu=True
)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(
a=1.5,
b=2.5,
double_output=True,
label_names=["labels", "labels_2"],
use_ipex=True,
bf16=mix_bf16,
use_cpu=True,
)
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
def test_dynamic_shapes(self):
eval_dataset = DynamicShapesDataset(batch_size=self.batch_size)
model = RegressionModel(a=2, b=1)
args = TrainingArguments("./regression")
trainer = Trainer(model, args, eval_dataset=eval_dataset)
# Check evaluation can run to completion
_ = trainer.evaluate()
# Check predictions
preds = trainer.predict(eval_dataset)
for expected, seen in zip(eval_dataset.ys, preds.label_ids):
self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
for expected, seen in zip(eval_dataset.xs, preds.predictions):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
# Same tests with eval accumulation
args = TrainingArguments("./regression", eval_accumulation_steps=2)
trainer = Trainer(model, args, eval_dataset=eval_dataset)
# Check evaluation can run to completion
_ = trainer.evaluate()
# Check predictions
preds = trainer.predict(eval_dataset)
for expected, seen in zip(eval_dataset.ys, preds.label_ids):
self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
for expected, seen in zip(eval_dataset.xs, preds.predictions):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
def test_log_level(self):
# testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere)
logger = logging.get_logger()
log_info_string = "Running training"
# test with the default log_level - should be the same as before and thus we test depending on is_info
is_info = logging.get_verbosity() <= 20
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer()
trainer.train()
if is_info:
self.assertIn(log_info_string, cl.out)
else:
self.assertNotIn(log_info_string, cl.out)
# test with low log_level - lower than info
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer(log_level="debug")
trainer.train()
self.assertIn(log_info_string, cl.out)
# test with high log_level - should be quiet
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer(log_level="error")
trainer.train()
self.assertNotIn(log_info_string, cl.out)
def test_save_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size))
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False)
@require_safetensors
def test_safe_checkpoints(self):
for save_safetensors in [True, False]:
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, save_safetensors=save_safetensors)
trainer.train()
self.check_saved_checkpoints(
tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), safe_weights=save_safetensors
)
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir, save_steps=5, pretrained=False, save_safetensors=save_safetensors
)
trainer.train()
self.check_saved_checkpoints(
tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors
)
@require_torch_multi_accelerator
def test_run_seq2seq_double_train_wrap_once(self):
# test that we don't wrap the model more than once
# since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for
# example DataParallel(DataParallel(model))
trainer = get_regression_trainer()
trainer.train()
model_wrapped_before = trainer.model_wrapped
trainer.train()
model_wrapped_after = trainer.model_wrapped
self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice")
@require_torch_up_to_2_accelerators
def test_can_resume_training(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
kwargs = {
"output_dir": tmpdir,
"train_len": 128,
"save_steps": 5,
"learning_rate": 0.1,
"logging_steps": 5,
}
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmpdir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
kwargs = {
"output_dir": tmpdir,
"train_len": 128,
"save_steps": 5,
"learning_rate": 0.1,
"pretrained": False,
}
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmpdir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check failures
# 1. fail to find a bogus checkpoint
trainer = get_regression_trainer()
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
self.assertTrue("Can't find a valid checkpoint at" in str(context.exception))
# 2. fail to find any checkpoint - due a fresh output_dir
output_dir2 = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=output_dir2)
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=True)
self.assertTrue("No valid checkpoint found in output directory" in str(context.exception))
def test_resume_training_with_randomness(self):
# For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used
# in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes
# GPU 0 will call first and sometimes GPU 1).
random_torch = not torch.cuda.is_available() or torch.cuda.device_count() <= 1
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
train_dataset = RegressionDataset(length=128)
eval_dataset = RegressionDataset()
with self.subTest("Test every step"):
config = RegressionModelConfig(a=0, b=2, random_torch=random_torch)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
model = RegressionRandomPreTrainedModel(config)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15"))
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
self.assertAlmostEqual(a, a1, delta=1e-5)
self.assertAlmostEqual(b, b1, delta=1e-5)
with self.subTest("Test every epoch"):
config = RegressionModelConfig(a=0, b=2, random_torch=random_torch)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
model = RegressionRandomPreTrainedModel(config)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")]
# There should be one checkpoint per epoch.
self.assertEqual(len(checkpoints), 3)
checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0]
trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir))
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
self.assertAlmostEqual(a, a1, delta=1e-5)
self.assertAlmostEqual(b, b1, delta=1e-5)
@slow
@require_accelerate
@require_torch_non_multi_accelerator
def test_auto_batch_size_finder(self):
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification")
)
sys.path.append(SRC_DIR)
import run_glue
with tempfile.TemporaryDirectory() as tmpdir:
testargs = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--task_name mrpc
--do_train
--do_eval
--max_seq_len 128
--per_device_train_batch_size 4096
--learning_rate 2e-5
--num_train_epochs 1
--output_dir {tmpdir}
--auto_find_batch_size 0
""".split()
with self.assertRaises(RuntimeError):
with patch.object(sys, "argv", testargs):
run_glue.main()
testargs[-1] = "1"
with patch.object(sys, "argv", testargs):
run_glue.main()
# regression for this issue: https://github.com/huggingface/transformers/issues/12970
def test_training_with_resume_from_checkpoint_false(self):
train_dataset = RegressionDataset(length=128)
eval_dataset = RegressionDataset()
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=False)
@require_torch_up_to_2_accelerators
def test_resume_training_with_shard_checkpoint(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
self.convert_to_sharded_checkpoint(checkpoint)
# Reinitialize trainer
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_safetensors
@require_torch_up_to_2_accelerators
def test_resume_training_with_safe_checkpoint(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
for initial_safe in [False, True]:
for loaded_safe in [False, True]:
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
save_steps=5,
learning_rate=0.1,
save_safetensors=initial_safe,
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
self.convert_to_sharded_checkpoint(checkpoint, load_safe=initial_safe, save_safe=loaded_safe)
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=loaded_safe
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_torch_up_to_2_accelerators
def test_resume_training_with_gradient_accumulation(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_torch_up_to_2_accelerators
def test_resume_training_with_frozen_params(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.model.a.requires_grad_(False)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.model.a.requires_grad_(False)
trainer.train(resume_from_checkpoint=checkpoint)
self.assertFalse(trainer.model.a.requires_grad)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_load_best_model_at_end(self):
total = int(self.n_epochs * 64 / self.batch_size)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss")
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total)
self.check_best_model_has_been_loaded(
tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True
)
# Test this works with a non PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
pretrained=False,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False)
@require_safetensors
def test_load_best_model_from_safetensors(self):
total = int(self.n_epochs * 64 / self.batch_size)
for save_safetensors, pretrained in product([False, True], [False, True]):
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
save_safetensors=save_safetensors,
pretrained=pretrained,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained, safe_weights=save_safetensors)
self.check_best_model_has_been_loaded(
tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained, safe_weights=save_safetensors
)
@slow
def test_trainer_eval_mrpc(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
)
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
training_args = TrainingArguments(output_dir="./examples", use_cpu=True)
trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset)
result = trainer.evaluate()
self.assertLess(result["eval_loss"], 0.2)
@slow
def test_trainer_eval_lm(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
dataset = LineByLineTextDataset(
tokenizer=tokenizer,
file_path=PATH_SAMPLE_TEXT,
block_size=tokenizer.max_len_single_sentence,
)
self.assertEqual(len(dataset), 31)
def test_training_iterable_dataset(self):
config = RegressionModelConfig()
model = RegressionPreTrainedModel(config)
# Adding one column not used by the model should have no impact
train_dataset = SampleIterableDataset(label_names=["labels", "extra"])
args = RegressionTrainingArguments(output_dir="./examples", max_steps=4)
trainer = Trainer(model=model, args=args, train_dataset=train_dataset)
trainer.train()
self.assertEqual(trainer.state.global_step, 4)
loader = trainer.get_train_dataloader()
self.assertIsInstance(loader, torch.utils.data.DataLoader)
self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler)
def test_evaluation_iterable_dataset(self):
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
# Adding one column not used by the model should have no impact
eval_dataset = SampleIterableDataset(label_names=["labels", "extra"])
args = RegressionTrainingArguments(output_dir="./examples")
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
eval_dataset = SampleIterableDataset(length=66)
results = trainer.evaluate(eval_dataset)
x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict_iterable_dataset(self):
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
args = RegressionTrainingArguments(output_dir="./examples")
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy())
preds = trainer.predict(trainer.eval_dataset).predictions
x = eval_dataset.dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
# Adding one column not used by the model should have no impact
test_dataset = SampleIterableDataset(length=66, label_names=["labels", "extra"])
preds = trainer.predict(test_dataset).predictions
x = test_dataset.dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
def test_num_train_epochs_in_training(self):
# len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given.
# It should give 1 update step for each epoch.
trainer = get_regression_trainer(
max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5
)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 3)
# Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if
# len(train_dl) < gradient_accumulation_steps.
trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(self.n_epochs))
def test_early_stopping_callback(self):
# early stopping stops training before num_training_epochs
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
num_train_epochs=20,
gradient_accumulation_steps=1,
per_device_train_batch_size=16,
load_best_model_at_end=True,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
compute_metrics=AlmostAccuracy(),
metric_for_best_model="accuracy",
)
trainer.add_callback(EarlyStoppingCallback(1, 0.0001))
train_output = trainer.train()
self.assertLess(train_output.global_step, 20 * 64 / 16)
# Invalid inputs to trainer with early stopping callback result in assertion error
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
num_train_epochs=20,
gradient_accumulation_steps=1,
per_device_train_batch_size=16,
evaluation_strategy=IntervalStrategy.EPOCH,
compute_metrics=AlmostAccuracy(),
metric_for_best_model="accuracy",
)
trainer.add_callback(EarlyStoppingCallback(1))
self.assertEqual(trainer.state.global_step, 0)
try:
trainer.train()
except AssertionError:
self.assertEqual(trainer.state.global_step, 0)
def test_flos_extraction(self):
trainer = get_regression_trainer(learning_rate=0.1)
def assert_flos_extraction(trainer, wrapped_model_to_check):
self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check))
self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0)
# with plain model
assert_flos_extraction(trainer, trainer.model)
# with enforced DataParallel
assert_flos_extraction(trainer, nn.DataParallel(trainer.model))
trainer.train()
self.assertTrue(isinstance(trainer.state.total_flos, float))
def check_checkpoint_deletion(self, trainer, output_dir, expected):
# Make fake checkpoints
for n in [5, 10, 15, 20, 25]:
os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True)
trainer._rotate_checkpoints(output_dir=output_dir)
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")]
values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints]
self.assertSetEqual(set(values), set(expected))
def test_checkpoint_rotation(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# Without best model at end
trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2)
self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25])
# With best model at end
trainer = get_regression_trainer(
output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=2
)
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5")
self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])
# Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume
# from checkpoint
trainer = get_regression_trainer(
output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=1
)
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25")
self.check_checkpoint_deletion(trainer, tmp_dir, [25])
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5")
self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])
def check_mem_metrics(self, trainer, check_func):
metrics = trainer.train().metrics
check_func("init_mem_cpu_alloc_delta", metrics)
check_func("train_mem_cpu_alloc_delta", metrics)
if backend_device_count(torch_device) > 0:
check_func("init_mem_gpu_alloc_delta", metrics)
check_func("train_mem_gpu_alloc_delta", metrics)
metrics = trainer.evaluate()
check_func("eval_mem_cpu_alloc_delta", metrics)
if backend_device_count(torch_device) > 0:
check_func("eval_mem_gpu_alloc_delta", metrics)
metrics = trainer.predict(RegressionDataset()).metrics
check_func("test_mem_cpu_alloc_delta", metrics)
if backend_device_count(torch_device) > 0:
check_func("test_mem_gpu_alloc_delta", metrics)
def test_mem_metrics(self):
# with mem metrics enabled
trainer = get_regression_trainer(skip_memory_metrics=False)
self.check_mem_metrics(trainer, self.assertIn)
# with mem metrics disabled
trainer = get_regression_trainer(skip_memory_metrics=True)
self.check_mem_metrics(trainer, self.assertNotIn)
@require_torch_accelerator
def test_fp16_full_eval(self):
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
n_gpus = backend_device_count(torch_device)
bs = 8
eval_len = 16 * n_gpus
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. with fp16_full_eval disabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with fp16_full_eval enabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False)
metrics = trainer.evaluate()
fp16_init = metrics["init_mem_gpu_alloc_delta"]
fp16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp16_init {fp16_init}")
print(f"fp16_eval {fp16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: fp16_init == close to zero
self.assertLess(fp16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(fp16_eval, 27_000)
# 3. relative comparison fp32 vs full fp16
# should be about half of fp16_init
# perfect world: fp32_init/2 == fp16_eval
self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000)
@require_torch_non_multi_gpu
@require_torchdynamo
@require_torch_tensorrt_fx
def test_torchdynamo_full_eval(self):
import torchdynamo
# torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu
n_gpus = get_gpu_count()
bs = 8
eval_len = 16 * n_gpus
# make the params are somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. Default - without TorchDynamo
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len)
metrics = trainer.evaluate()
original_eval_loss = metrics["eval_loss"]
del trainer
# 2. TorchDynamo eager
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="eager")
metrics = trainer.evaluate()
self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss)
del trainer
torchdynamo.reset()
# 3. TorchDynamo nvfuser
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="nvfuser")
metrics = trainer.evaluate()
self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss)
torchdynamo.reset()
# 4. TorchDynamo fx2trt
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt")
metrics = trainer.evaluate()
self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss)
torchdynamo.reset()
@unittest.skip("torch 2.0.0 gives `ModuleNotFoundError: No module named 'torchdynamo'`.")
@require_torch_non_multi_gpu
@require_torchdynamo
def test_torchdynamo_memory(self):
# torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu
import torchdynamo
class CustomTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
x = inputs["x"]
output = model(x)
if self.args.n_gpu == 1:
return output.mean()
return output
class MyModule(torch.nn.Module):
"""Simple module that does aggressive fusion"""
def __init__(self):
super().__init__()
def forward(self, x):
for _ in range(20):
x = torch.cos(x)
return x
mod = MyModule()
# 1. without TorchDynamo (eager baseline)
a = torch.ones(1024, 1024, device="cuda", requires_grad=True)
a.grad = None
trainer = CustomTrainer(model=mod)
# warmup
for _ in range(10):
orig_loss = trainer.training_step(mod, {"x": a})
# resets
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
orig_loss = trainer.training_step(mod, {"x": a})
orig_peak_mem = torch.cuda.max_memory_allocated()
torchdynamo.reset()
del trainer
# 2. TorchDynamo nvfuser
a = torch.ones(1024, 1024, device="cuda", requires_grad=True)
a.grad = None
args = TrainingArguments(output_dir="None", torchdynamo="nvfuser")
trainer = CustomTrainer(model=mod, args=args)
# warmup
for _ in range(10):
loss = trainer.training_step(mod, {"x": a})
# resets
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
loss = trainer.training_step(mod, {"x": a})
peak_mem = torch.cuda.max_memory_allocated()
torchdynamo.reset()
del trainer
# Functional check
self.assertAlmostEqual(loss, orig_loss)
# AOT Autograd recomputaion and nvfuser recomputation optimization
# aggressively fuses the operations and reduce the memory footprint.
self.assertGreater(orig_peak_mem, peak_mem * 2)
@require_torch_accelerator
@require_torch_bf16
def test_bf16_full_eval(self):
# note: most of the logic is the same as test_fp16_full_eval
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
n_gpus = backend_device_count(torch_device)
bs = 8
eval_len = 16 * n_gpus
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. with bf16_full_eval disabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with bf16_full_eval enabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False)
metrics = trainer.evaluate()
bf16_init = metrics["init_mem_gpu_alloc_delta"]
bf16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"bf16_init {bf16_init}")
print(f"bf16_eval {bf16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: bf16_init == close to zero
self.assertLess(bf16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(bf16_eval, 27_000)
# 3. relative comparison fp32 vs full bf16
# should be about half of bf16_init
# perfect world: fp32_init/2 == bf16_eval
self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000)
def test_no_wd_param_group(self):
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
trainer = Trainer(model=model)
trainer.create_optimizer_and_scheduler(10)
wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] # fmt: skip
wd_params = [p for n, p in model.named_parameters() if n in wd_names]
no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names]
self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params)
self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params)
@slow
@require_torch_multi_accelerator
def test_end_to_end_example(self):
# Tests that `translation.py` will run without issues
script_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "..", "examples", "pytorch", "translation", "run_translation.py"
)
)
with tempfile.TemporaryDirectory() as tmpdir:
command = [
"accelerate",
"launch",
script_path,
"--model_name_or_path",
"t5-small",
"--per_device_train_batch_size",
"1",
"--output_dir",
tmpdir,
"--overwrite_output_dir",
"--do_train",
"--max_train_samples",
"64",
"--num_train_epochs",
"1",
"--dataset_name",
"wmt16",
"--dataset_config",
"ro-en",
"--source_lang",
"en",
"--target_lang",
"ro",
"--do_predict",
"--max_predict_samples",
"64",
"--predict_with_generate",
"--ddp_timeout",
"60",
]
execute_subprocess_async(command)
# successful return here == success - any errors would have caused an error or a timeout in the sub-call
@require_torch
@is_staging_test
class TrainerIntegrationWithHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
HfFolder.save_token(TOKEN)
@classmethod
def tearDownClass(cls):
for model in ["test-trainer", "test-trainer-epoch", "test-trainer-step", "test-trainer-tensorboard"]:
try:
delete_repo(token=cls._token, repo_id=model)
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-trainer-org")
except HTTPError:
pass
def test_push_to_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer"),
push_to_hub=True,
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"{USER}/test-trainer")
model = RegressionPreTrainedModel.from_pretrained(repo_name)
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def test_push_to_hub_in_organization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir)
trainer.save_model()
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-org"),
push_to_hub=True,
hub_model_id="valid_org/test-trainer-org",
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, "valid_org/test-trainer-org")
model = RegressionPreTrainedModel.from_pretrained("valid_org/test-trainer-org")
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def get_commit_history(self, repo):
commit_logs = subprocess.run(
"git log".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=repo,
).stdout
commits = commit_logs.split("\n\n")[1::2]
return [commit.strip() for commit in commits]
def test_push_to_hub_with_saves_each_epoch(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-epoch"),
push_to_hub=True,
hub_token=self._token,
# To avoid any flakiness if the training goes faster than the uploads.
hub_always_push=True,
save_strategy="epoch",
)
trainer.train()
commits = list_repo_commits(f"{USER}/test-trainer-epoch", token=self._token)
commits = [c.title for c in commits]
self.assertIn("initial commit", commits)
for i in range(1, 4):
self.assertIn(f"Training in progress, epoch {i}", commits)
def test_push_to_hub_with_saves_each_n_steps(self):
num_gpus = max(1, backend_device_count(torch_device))
if num_gpus > 2:
return
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-step"),
push_to_hub=True,
hub_token=self._token,
# To avoid any flakiness if the training goes faster than the uploads.
hub_always_push=True,
save_strategy="steps",
save_steps=5,
)
trainer.train()
commits = list_repo_commits(f"{USER}/test-trainer-step", token=self._token)
commits = [c.title for c in commits]
self.assertIn("initial commit", commits)
# max_steps depend on the number of available GPUs
max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader()))
for i in range(5, max_steps, 5):
self.assertIn(f"Training in progress, step {i}", commits)
@require_tensorboard
def test_push_to_hub_with_tensorboard_logs(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-tensorboard"),
hub_token=self._token,
save_strategy="epoch",
report_to=["tensorboard"],
keep_report_to=True,
)
trainer.train()
# Push the runs via `push_to_hub()`
trainer.push_to_hub()
files = list_repo_files(f"{USER}/test-trainer-tensorboard", token=self._token)
found_log = False
for f in files:
if len(f.split("runs")) > 1 and "events.out.tfevents" in f:
found_log = True
assert found_log is True, "No tensorboard log found in repo"
@require_torch
@require_optuna
class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4)
@require_torch
@require_optuna
class TrainerHyperParameterMultiObjectOptunaIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
def compute_objective(metrics: Dict[str, float]) -> List[float]:
return metrics["eval_loss"], metrics["eval_accuracy"]
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=10,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
compute_metrics=AlmostAccuracy(),
)
trainer.hyperparameter_search(
direction=["minimize", "maximize"],
hp_space=hp_space,
hp_name=hp_name,
n_trials=4,
compute_objective=compute_objective,
)
@require_torch
@require_ray
class TrainerHyperParameterRayIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def ray_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
from ray import tune
return {
"a": tune.randint(-4, 4),
"b": tune.randint(-4, 4),
}
def model_init(config):
if config is None:
a = 0
b = 0
else:
a = config["a"]
b = config["b"]
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config)
def hp_name(params):
return MyTrialShortNamer.shortname(params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4
)
def test_hyperparameter_search(self):
self.ray_hyperparameter_search()
def test_hyperparameter_search_ray_client(self):
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
with ray_start_client_server():
assert ray.util.client.ray.is_connected()
self.ray_hyperparameter_search()
@slow
@require_torch
@require_sigopt
class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return [
{"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"},
{"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"},
]
def model_init(trial):
if trial is not None:
a = trial.assignments["a"]
b = trial.assignments["b"]
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.assignments)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4
)
optim_test_params = []
if is_torch_available():
default_adam_kwargs = {
"betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2),
"eps": TrainingArguments.adam_epsilon,
"lr": TrainingArguments.learning_rate,
}
default_lion_kwargs = {
"betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2),
"lr": TrainingArguments.learning_rate,
}
default_anyprecision_kwargs = {
"use_kahan_summation": False,
"momentum_dtype": torch.float32,
"variance_dtype": torch.float32,
"compensation_buffer_dtype": torch.bfloat16,
}
optim_test_params = [
(
TrainingArguments(optim=OptimizerNames.ADAMW_HF, output_dir="None"),
transformers.optimization.AdamW,
default_adam_kwargs,
),
(
TrainingArguments(optim=OptimizerNames.ADAMW_HF.value, output_dir="None"),
transformers.optimization.AdamW,
default_adam_kwargs,
),
(
TrainingArguments(optim=OptimizerNames.ADAMW_TORCH, output_dir="None"),
torch.optim.AdamW,
default_adam_kwargs,
),
(
TrainingArguments(optim=OptimizerNames.ADAFACTOR, output_dir="None"),
transformers.optimization.Adafactor,
{
"scale_parameter": False,
"relative_step": False,
"lr": TrainingArguments.learning_rate,
},
),
]
if is_apex_available():
import apex
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"),
apex.optimizers.FusedAdam,
default_adam_kwargs,
)
)
if is_bitsandbytes_available():
import bitsandbytes as bnb
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"),
bnb.optim.AdamW,
default_adam_kwargs,
)
)
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"),
bnb.optim.AdamW,
default_adam_kwargs,
)
)
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"),
bnb.optim.AdamW,
default_adam_kwargs,
)
)
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"),
bnb.optim.AdamW,
default_adam_kwargs,
)
)
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.LION, output_dir="None"),
bnb.optim.Lion,
default_lion_kwargs,
)
)
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"),
bnb.optim.Lion,
default_lion_kwargs,
)
)
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"),
bnb.optim.Lion,
default_lion_kwargs,
)
)
if is_torchdistx_available():
import torchdistx
optim_test_params.append(
(
TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"),
torchdistx.optimizers.AnyPrecisionAdamW,
dict(default_adam_kwargs, **default_anyprecision_kwargs),
)
)
@require_torch
class TrainerOptimizerChoiceTest(unittest.TestCase):
def check_optim_and_kwargs(self, training_args: TrainingArguments, expected_cls, expected_kwargs):
actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
self.assertEqual(expected_cls, actual_cls)
self.assertIsNotNone(optim_kwargs)
for p, v in expected_kwargs.items():
self.assertTrue(p in optim_kwargs)
actual_v = optim_kwargs[p]
self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.")
@parameterized.expand(optim_test_params, skip_on_empty=True)
def test_optim_supported(self, training_args: TrainingArguments, expected_cls, expected_kwargs):
# exercises all the valid --optim options
self.check_optim_and_kwargs(training_args, expected_cls, expected_kwargs)
trainer = get_regression_trainer(**training_args.to_dict())
trainer.train()
def test_fused_adam(self):
# Pretend that apex is installed and mock apex.optimizers.FusedAdam exists.
# Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam. It only has to return the
# class given, so mocking apex.optimizers.FusedAdam should be fine for testing and allow
# the test to run without requiring an apex installation.
mock = Mock()
modules = {
"apex": mock,
"apex.optimizers": mock.optimizers,
"apex.optimizers.FusedAdam": mock.optimizers.FusedAdam,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"),
mock.optimizers.FusedAdam,
default_adam_kwargs,
)
def test_fused_adam_no_apex(self):
args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None")
# Pretend that apex does not exist, even if installed. By setting apex to None, importing
# apex will fail even if apex is installed.
with patch.dict("sys.modules", {"apex.optimizers": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
def test_bnb_adam8bit(self):
# Pretend that Bits and Bytes is installed and mock bnb.optim.Adam8bit exists.
# Trainer.get_optimizer_cls_and_kwargs does not use Adam8bit. It only has to return the
# class given, so mocking bnb.optim.Adam8bit should be fine for testing and allow
# the test to run without requiring a bnb installation.
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.AdamW": mock.optim.AdamW,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"),
mock.optim.AdamW,
default_adam_kwargs,
)
def test_bnb_paged_adam8bit_alias(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.AdamW": mock.optim.AdamW,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"),
mock.optim.AdamW,
default_adam_kwargs,
)
def test_bnb_paged_adam(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.AdamW": mock.optim.AdamW,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"),
mock.optim.AdamW,
default_adam_kwargs,
)
def test_bnb_paged_adam8bit(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.AdamW": mock.optim.AdamW,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"),
mock.optim.AdamW,
default_adam_kwargs,
)
def test_bnb_lion(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.Lion": mock.optim.Lion,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.LION, output_dir="None"),
mock.optim.Lion,
default_lion_kwargs,
)
def test_bnb_lion8bit(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.Lion": mock.optim.Lion,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"),
mock.optim.Lion,
default_lion_kwargs,
)
def test_bnb_paged_lion8bit(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.Lion": mock.optim.Lion,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"),
mock.optim.Lion,
default_lion_kwargs,
)
def test_bnb_paged_lion(self):
mock = Mock()
modules = {
"bitsandbytes": mock,
"bitsandbytes.optim": mock.optim,
"bitsandbytes.optim.Lion": mock.optim.Lion,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None"),
mock.optim.Lion,
default_lion_kwargs,
)
def test_bnb_adam8bit_no_bnb(self):
args = TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None")
# Pretend that bnb does not exist, even if installed. By setting bnb to None, importing
# bnb will fail even if bnb is installed.
with patch.dict("sys.modules", {"bitsandbytes.optim": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
def test_bnb_paged_adam_no_bnb(self):
args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None")
# Pretend that bnb does not exist, even if installed. By setting bnb to None, importing
# bnb will fail even if bnb is installed.
with patch.dict("sys.modules", {"bitsandbytes.optim": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
def test_bnb_paged_adam8bit_no_bnb(self):
args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None")
# Pretend that bnb does not exist, even if installed. By setting bnb to None, importing
# bnb will fail even if bnb is installed.
with patch.dict("sys.modules", {"bitsandbytes.optim": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
def test_bnb_paged_lion_no_bnb(self):
args = TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None")
# Pretend that bnb does not exist, even if installed. By setting bnb to None, importing
# bnb will fail even if bnb is installed.
with patch.dict("sys.modules", {"bitsandbytes.optim": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
def test_bnb_paged_lion8bit_no_bnb(self):
args = TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None")
# Pretend that bnb does not exist, even if installed. By setting bnb to None, importing
# bnb will fail even if bnb is installed.
with patch.dict("sys.modules", {"bitsandbytes.optim": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
def test_anyprecision_adamw(self):
# Pretend that torchdistx is installed and mock torchdistx.optimizers.AnyPrecisionAdamW exists.
# Trainer.get_optimizer_cls_and_kwargs does not use AnyPrecisioinAdamW. It only has to return the
# class given, so mocking torchdistx.optimizers.AnyPrecisionAdamW should be fine for testing and allow
# the test to run without requiring a bnb installation.
mock = Mock()
modules = {
"torchdistx": mock,
"torchdistx.optimizers": mock.optimizers,
"torchdistx.optimizers.AnyPrecisionAdamW.": mock.optimizers.AnyPrecisionAdamW,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"),
mock.optimizers.AnyPrecisionAdamW,
dict(default_adam_kwargs, **default_anyprecision_kwargs),
)
def test_no_torchdistx_anyprecision_adamw(self):
args = TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None")
# Pretend that torchdistx does not exist, even if installed. By setting torchdistx to None, importing
# torchdistx.optimizers will fail even if torchdistx is installed.
with patch.dict("sys.modules", {"torchdistx.optimizers": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
@require_torch
@require_wandb
class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {
"method": "random",
"metric": {},
"parameters": {
"a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"b": {"distribution": "int_uniform", "min": 1, "max": 6},
},
}
def model_init(config):
if config is None:
a = 0
b = 0
else:
a = config["a"]
b = config["b"]
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config)
def hp_name(params):
return MyTrialShortNamer.shortname(params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="wandb", n_trials=4, anonymous="must"
)
class HyperParameterSearchBackendsTest(unittest.TestCase):
def test_hyperparameter_search_backends(self):
self.assertEqual(
list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()),
list(HPSearchBackend),
)
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_trainer_seq2seq.py
|
# coding=utf-8
# Copyright 2020 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import (
AutoModelForSeq2SeqLM,
BertTokenizer,
DataCollatorForSeq2Seq,
EncoderDecoderModel,
GenerationConfig,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
T5Tokenizer,
)
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class Seq2seqTrainerTester(TestCasePlus):
@slow
@require_torch
def test_finetune_bert2bert(self):
bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size
bert2bert.config.eos_token_id = tokenizer.sep_token_id
bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id
bert2bert.config.max_length = 128
train_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]")
val_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]")
train_dataset = train_dataset.select(range(32))
val_dataset = val_dataset.select(range(16))
batch_size = 4
def _map_to_encoder_decoder_inputs(batch):
# Tokenizer will automatically set [BOS] <text> [EOS]
inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512)
outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128)
batch["input_ids"] = inputs.input_ids
batch["attention_mask"] = inputs.attention_mask
batch["decoder_input_ids"] = outputs.input_ids
batch["labels"] = outputs.input_ids.copy()
batch["labels"] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
batch["decoder_attention_mask"] = outputs.attention_mask
assert all(len(x) == 512 for x in inputs.input_ids)
assert all(len(x) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(pred):
labels_ids = pred.label_ids
pred_ids = pred.predictions
# all unnecessary tokens are removed
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)
accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str)
return {"accuracy": accuracy}
# map train dataset
train_dataset = train_dataset.map(
_map_to_encoder_decoder_inputs,
batched=True,
batch_size=batch_size,
remove_columns=["article", "highlights"],
)
train_dataset.set_format(
type="torch",
columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],
)
# same for validation dataset
val_dataset = val_dataset.map(
_map_to_encoder_decoder_inputs,
batched=True,
batch_size=batch_size,
remove_columns=["article", "highlights"],
)
val_dataset.set_format(
type="torch",
columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],
)
output_dir = self.get_auto_remove_tmp_dir()
training_args = Seq2SeqTrainingArguments(
output_dir=output_dir,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
predict_with_generate=True,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
warmup_steps=0,
eval_steps=2,
logging_steps=2,
)
# instantiate trainer
trainer = Seq2SeqTrainer(
model=bert2bert,
args=training_args,
compute_metrics=_compute_metrics,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
)
# start training
trainer.train()
@slow
@require_torch
def test_return_sequences(self):
# Tests that the number of generated sequences is correct when num_return_sequences > 1
# and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics`
INPUT_COLUMN = "question"
TARGET_COLUMN = "answer"
MAX_INPUT_LENGTH = 256
MAX_TARGET_LENGTH = 256
dataset = datasets.load_dataset("gsm8k", "main", split="train[:38]")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest")
gen_config = GenerationConfig.from_pretrained(
"t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5
)
training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True)
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=lambda x: {"samples": x[0].shape[0]},
)
def prepare_data(examples):
# Remove pairs where at least one record is none
inputs = examples[INPUT_COLUMN]
targets = examples[TARGET_COLUMN]
model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True)
labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN])
dataset_len = len(prepared_dataset) # 38
for num_return_sequences in range(3, 0, -1):
gen_config.num_return_sequences = num_return_sequences
metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config)
assert (
metrics["eval_samples"] == dataset_len * num_return_sequences
), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}"
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/trainer/test_data_collator.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import (
BertTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
default_data_collator,
is_tf_available,
is_torch_available,
set_seed,
)
from transformers.testing_utils import require_tf, require_torch
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8)))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
# Labels can already be tensors
features = [{"label": torch.tensor(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.float)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10]))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8]))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, torch.Size([2, 6]))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10]))
self.assertEqual(batch["labels"].shape, torch.Size([2, 10]))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8]))
self.assertEqual(batch["labels"].shape, torch.Size([2, 8]))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, torch.Size([2, 6]))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3)
for feature in features:
feature.pop("labels")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
def test_data_collator_for_token_classification_works_with_pt_tensors(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": torch.tensor([0, 1, 2]), "labels": torch.tensor([0, 1, 2])},
{"input_ids": torch.tensor([0, 1, 2, 3, 4, 5]), "labels": torch.tensor([0, 1, 2, 3, 4, 5])},
]
data_collator = DataCollatorForTokenClassification(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, torch.Size([2, 6]))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(tokenizer, padding="max_length", max_length=10)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 10]))
self.assertEqual(batch["labels"].shape, torch.Size([2, 10]))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 8]))
self.assertEqual(batch["labels"].shape, torch.Size([2, 8]))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, torch.Size([2, 6]))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3)
for feature in features:
feature.pop("labels")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size([2, 6]))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 16)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 16)))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="pt")
features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
# Features can already be tensors
features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 10)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 10)))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 5)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,)))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 8)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((2,)))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{
"input_ids": torch.tensor([0, 1, 2, 3, 4]),
"token_type_ids": torch.tensor([0, 1, 2, 3, 4]),
"sentence_order_label": i,
}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 5)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 5)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,)))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((2, 8)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 8)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((2,)))
@require_tf
class TFDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].numpy().tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].numpy().tolist(), ([[0, 1, 2]] * 8))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].numpy().tolist(), (list(range(8))))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 10])
# Labels can already be tensors
features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["labels"].numpy().tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, tf.int64)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 10])
def test_numpy_dtype_preservation(self):
data_collator = default_data_collator
# Confirms that numpy inputs are handled correctly even when scalars
features = [{"input_ids": np.array([0, 1, 2, 3, 4]), "label": np.int64(i)} for i in range(4)]
batch = data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.int64)
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.int64)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features, return_tensors="tf")
self.assertEqual(batch["labels"].dtype, tf.float32)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="tf")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape.as_list(), [8, 6])
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6])
self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, [2, 8])
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6])
self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape.as_list(), [2, 6])
self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(
tokenizer, padding="max_length", max_length=10, return_tensors="tf"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["labels"].shape.as_list(), [2, 8])
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 6])
self.assertEqual(batch["input_ids"][0].numpy().tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape.as_list(), [2, 6])
self.assertEqual(batch["labels"][0].numpy().tolist(), [0, 1, 2] + [-1] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
data_collator = DataCollatorForLanguageModeling(
tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="tf"
)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="tf")
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
batch = data_collator(pad_features, return_tensors="tf")
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
batch = data_collator(pad_features, return_tensors="tf")
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 16])
self.assertEqual(batch["labels"].shape.as_list(), [2, 16])
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(tf.reduce_any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"].numpy()[~masked_tokens.numpy()].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="tf")
features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
# Features can already be tensors
features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 10])
self.assertEqual(batch["perm_mask"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["target_mapping"].shape.as_list(), [2, 10, 10])
self.assertEqual(batch["labels"].shape.as_list(), [2, 10])
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["labels"].shape.as_list(), [2, 5])
self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2])
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["labels"].shape.as_list(), [2, 8])
self.assertEqual(batch["next_sentence_label"].shape.as_list(), [2])
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{
"input_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]),
"token_type_ids": tf.convert_to_tensor([0, 1, 2, 3, 4]),
"sentence_order_label": i,
}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 5])
self.assertEqual(batch["labels"].shape.as_list(), [2, 5])
self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2])
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="tf")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["token_type_ids"].shape.as_list(), [2, 8])
self.assertEqual(batch["labels"].shape.as_list(), [2, 8])
self.assertEqual(batch["sentence_order_label"].shape.as_list(), [2])
class NumpyDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
self.vocab_file = os.path.join(self.tmpdirname, "vocab.txt")
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 6))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), [[0, 1, 2]] * 8)
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 6))
# Features can already be tensors
features = [{"label": i, "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].tolist(), list(range(8)))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 10))
# Labels can already be tensors
features = [{"label": np.array(i), "inputs": np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["labels"].tolist(), (list(range(8))))
self.assertEqual(batch["labels"].dtype, np.int64)
self.assertEqual(batch["inputs"].shape, (8, 10))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{"input_ids": [0, 1, 2, 3, 4], "label": i} for i in range(4)]
batch = data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.int64)
features = [{"input_ids": [0, 1, 2, 3, 4], "label": float(i)} for i in range(4)]
batch = data_collator(features, return_tensors="np")
self.assertEqual(batch["labels"].dtype, np.float32)
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, (8, 6))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors="np")
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, (8, 6))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{"input_ids": [0, 1, 2]}, {"input_ids": [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
data_collator = DataCollatorWithPadding(tokenizer, padding="max_length", max_length=10, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2], "labels": [0, 1, 2]},
{"input_ids": [0, 1, 2, 3, 4, 5], "labels": [0, 1, 2, 3, 4, 5]},
]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-100] * 3)
data_collator = DataCollatorForTokenClassification(
tokenizer, padding="max_length", max_length=10, return_tensors="np"
)
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=-1, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 6))
self.assertEqual(batch["input_ids"][0].tolist(), [0, 1, 2] + [tokenizer.pad_token_id] * 3)
self.assertEqual(batch["labels"].shape, (2, 6))
self.assertEqual(batch["labels"][0].tolist(), [0, 1, 2] + [-1] * 3)
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
batch = data_collator(pad_features, return_tensors="np")
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
data_collator = DataCollatorForLanguageModeling(
tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors="np"
)
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
batch = data_collator(pad_features, return_tensors="np")
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors="np")
with self.assertRaises(ValueError):
# Expect error due to padding token missing
data_collator(pad_features)
set_seed(42) # For reproducibility
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(no_pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
batch = data_collator(pad_features)
self.assertEqual(batch["input_ids"].shape, (2, 16))
self.assertEqual(batch["labels"].shape, (2, 16))
masked_tokens = batch["input_ids"] == tokenizer.mask_token_id
self.assertTrue(np.any(masked_tokens))
# self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist()))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors="np")
features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
# Features can already be tensors
features = [{"input_ids": np.arange(10)}, {"input_ids": np.arange(10)}]
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{"input_ids": list(range(10))}, {"input_ids": list(range(10))}]
pad_features = [{"input_ids": list(range(5))}, {"input_ids": list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["perm_mask"].shape, (2, 10, 10))
self.assertEqual(batch["target_mapping"].shape, (2, 10, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, (2, 10))
self.assertEqual(batch["perm_mask"].shape, (2, 10, 10))
self.assertEqual(batch["target_mapping"].shape, (2, 10, 10))
self.assertEqual(batch["labels"].shape, (2, 10))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{"input_ids": [0, 1, 2, 3, 4], "token_type_ids": [0, 1, 2, 3, 4], "next_sentence_label": i}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 5))
self.assertEqual(batch["token_type_ids"].shape, (2, 5))
self.assertEqual(batch["labels"].shape, (2, 5))
self.assertEqual(batch["next_sentence_label"].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["token_type_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
self.assertEqual(batch["next_sentence_label"].shape, (2,))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [
{
"input_ids": np.array([0, 1, 2, 3, 4]),
"token_type_ids": np.array([0, 1, 2, 3, 4]),
"sentence_order_label": i,
}
for i in range(2)
]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 5))
self.assertEqual(batch["token_type_ids"].shape, (2, 5))
self.assertEqual(batch["labels"].shape, (2, 5))
self.assertEqual(batch["sentence_order_label"].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors="np")
batch = data_collator(features)
self.assertEqual(batch["input_ids"].shape, (2, 8))
self.assertEqual(batch["token_type_ids"].shape, (2, 8))
self.assertEqual(batch["labels"].shape, (2, 8))
self.assertEqual(batch["sentence_order_label"].shape, (2,))
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_tools_common.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
authorized_types = ["text", "image", "audio"]
def create_inputs(input_types: List[str]):
inputs = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512))
)
elif input_type == "audio":
inputs.append(torch.ones(3000))
elif isinstance(input_type, list):
inputs.append(create_inputs(input_type))
else:
raise ValueError(f"Invalid type requested: {input_type}")
return inputs
def output_types(outputs: List):
output_types = []
for output in outputs:
if isinstance(output, (str, AgentText)):
output_types.append("text")
elif isinstance(output, (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(output, (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(f"Invalid output: {output}")
return output_types
@is_tool_test
class ToolTesterMixin:
def test_inputs_outputs(self):
self.assertTrue(hasattr(self.tool, "inputs"))
self.assertTrue(hasattr(self.tool, "outputs"))
inputs = self.tool.inputs
for _input in inputs:
if isinstance(_input, list):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
outputs = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def test_call(self):
inputs = create_inputs(self.tool.inputs)
outputs = self.tool(*inputs)
# There is a single output
if len(self.tool.outputs) == 1:
outputs = [outputs]
self.assertListEqual(output_types(outputs), self.tool.outputs)
def test_common_attributes(self):
self.assertTrue(hasattr(self.tool, "description"))
self.assertTrue(hasattr(self.tool, "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def test_agent_types_outputs(self):
inputs = create_inputs(self.tool.inputs)
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
for output, output_type in zip(outputs, self.tool.outputs):
agent_type = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(output, agent_type))
def test_agent_types_inputs(self):
inputs = create_inputs(self.tool.inputs)
_inputs = []
for _input, input_type in zip(inputs, self.tool.inputs):
if isinstance(input_type, list):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_translation.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from transformers.tools.agent_types import AGENT_TYPE_MAPPING
from .test_tools_common import ToolTesterMixin, output_types
class TranslationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("translation")
self.tool.setup()
self.remote_tool = load_tool("translation", remote=True)
def test_exact_match_arg(self):
result = self.tool("Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_exact_match_arg_remote(self):
result = self.remote_tool("Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_exact_match_kwarg(self):
result = self.tool(text="Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text="Hey, what's up?", src_lang="English", tgt_lang="French")
self.assertEqual(result, "- Hé, comment ça va?")
def test_call(self):
inputs = ["Hey, what's up?", "English", "Spanish"]
outputs = self.tool(*inputs)
# There is a single output
if len(self.tool.outputs) == 1:
outputs = [outputs]
self.assertListEqual(output_types(outputs), self.tool.outputs)
def test_agent_types_outputs(self):
inputs = ["Hey, what's up?", "English", "Spanish"]
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
for output, output_type in zip(outputs, self.tool.outputs):
agent_type = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(output, agent_type))
def test_agent_types_inputs(self):
inputs = ["Hey, what's up?", "English", "Spanish"]
_inputs = []
for _input, input_type in zip(inputs, self.tool.inputs):
if isinstance(input_type, list):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
outputs = self.tool(*inputs)
if not isinstance(outputs, list):
outputs = [outputs]
self.assertEqual(len(outputs), len(self.tool.outputs))
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_document_question_answering.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datasets import load_dataset
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class DocumentQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("document-question-answering")
self.tool.setup()
self.remote_tool = load_tool("document-question-answering", remote=True)
def test_exact_match_arg(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
result = self.tool(document, "When is the coffee break?")
self.assertEqual(result, "11-14 to 11:39 a.m.")
def test_exact_match_arg_remote(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
result = self.remote_tool(document, "When is the coffee break?")
self.assertEqual(result, "11-14 to 11:39 a.m.")
def test_exact_match_kwarg(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
self.tool(document=document, question="When is the coffee break?")
def test_exact_match_kwarg_remote(self):
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
document = dataset[0]["image"]
result = self.remote_tool(document=document, question="When is the coffee break?")
self.assertEqual(result, "11-14 to 11:39 a.m.")
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_text_classification.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class TextClassificationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("text-classification")
self.tool.setup()
self.remote_tool = load_tool("text-classification", remote=True)
def test_exact_match_arg(self):
result = self.tool("That's quite cool", ["positive", "negative"])
self.assertEqual(result, "positive")
def test_exact_match_arg_remote(self):
result = self.remote_tool("That's quite cool", ["positive", "negative"])
self.assertEqual(result, "positive")
def test_exact_match_kwarg(self):
result = self.tool(text="That's quite cool", labels=["positive", "negative"])
self.assertEqual(result, "positive")
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text="That's quite cool", labels=["positive", "negative"])
self.assertEqual(result, "positive")
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_image_segmentation.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageSegmentationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-segmentation")
self.tool.setup()
self.remote_tool = load_tool("image-segmentation", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image, "cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image, "cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image, label="cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image, label="cat")
self.assertTrue(isinstance(result, Image.Image))
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_python_interpreter.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
# Fake function we will use as tool
def add_two(x):
return x + 2
class PythonInterpreterTester(unittest.TestCase):
def test_evaluate_assign(self):
code = "x = 3"
state = {}
result = evaluate(code, {}, state=state)
assert result == 3
self.assertDictEqual(state, {"x": 3})
code = "x = y"
state = {"y": 5}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(state, {"x": 5, "y": 5})
def test_evaluate_call(self):
code = "y = add_two(x)"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
assert result == 5
self.assertDictEqual(state, {"x": 3, "y": 5})
# Won't work without the tool
with CaptureStdout() as out:
result = evaluate(code, {}, state=state)
assert result is None
assert "tried to execute add_two" in out.out
def test_evaluate_constant(self):
code = "x = 3"
state = {}
result = evaluate(code, {}, state=state)
assert result == 3
self.assertDictEqual(state, {"x": 3})
def test_evaluate_dict(self):
code = "test_dict = {'x': x, 'y': add_two(x)}"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
self.assertDictEqual(result, {"x": 3, "y": 5})
self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}})
def test_evaluate_expression(self):
code = "x = 3\ny = 5"
state = {}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(state, {"x": 3, "y": 5})
def test_evaluate_f_string(self):
code = "text = f'This is x: {x}.'"
state = {"x": 3}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(state, {"x": 3, "text": "This is x: 3."})
def test_evaluate_if(self):
code = "if x <= 3:\n y = 2\nelse:\n y = 5"
state = {"x": 3}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(state, {"x": 3, "y": 2})
state = {"x": 8}
result = evaluate(code, {}, state=state)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(state, {"x": 8, "y": 5})
def test_evaluate_list(self):
code = "test_list = [x, add_two(x)]"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
self.assertListEqual(result, [3, 5])
self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]})
def test_evaluate_name(self):
code = "y = x"
state = {"x": 3}
result = evaluate(code, {}, state=state)
assert result == 3
self.assertDictEqual(state, {"x": 3, "y": 3})
def test_evaluate_subscript(self):
code = "test_list = [x, add_two(x)]\ntest_list[1]"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
assert result == 5
self.assertDictEqual(state, {"x": 3, "test_list": [3, 5]})
code = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
state = {"x": 3}
result = evaluate(code, {"add_two": add_two}, state=state)
assert result == 5
self.assertDictEqual(state, {"x": 3, "test_dict": {"x": 3, "y": 5}})
def test_evaluate_for(self):
code = "x = 0\nfor i in range(3):\n x = i"
state = {}
result = evaluate(code, {"range": range}, state=state)
assert result == 2
self.assertDictEqual(state, {"x": 2, "i": 2})
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_image_captioning.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageCaptioningToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-captioning")
self.tool.setup()
self.remote_tool = load_tool("image-captioning", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image)
self.assertEqual(result, "two cats sleeping on a couch")
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image)
self.assertEqual(result, "two cats sleeping on a couch")
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image)
self.assertEqual(result, "two cats sleeping on a couch")
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image)
self.assertEqual(result, "two cats sleeping on a couch")
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_text_question_answering.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
TEXT = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class TextQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("text-question-answering")
self.tool.setup()
self.remote_tool = load_tool("text-question-answering", remote=True)
def test_exact_match_arg(self):
result = self.tool(TEXT, "What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
def test_exact_match_arg_remote(self):
result = self.remote_tool(TEXT, "What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
def test_exact_match_kwarg(self):
result = self.tool(text=TEXT, question="What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
def test_exact_match_kwarg_remote(self):
result = self.remote_tool(text=TEXT, question="What did Hugging Face do in April 2021?")
self.assertEqual(result, "launched the BigScience Research Workshop")
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_speech_to_text.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available, load_tool
from .test_tools_common import ToolTesterMixin
if is_torch_available():
import torch
class SpeechToTextToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("speech-to-text")
self.tool.setup()
def test_exact_match_arg(self):
result = self.tool(torch.ones(3000))
self.assertEqual(result, " you")
def test_exact_match_kwarg(self):
result = self.tool(audio=torch.ones(3000))
self.assertEqual(result, " you")
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_text_to_speech.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class TextToSpeechToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("text-to-speech")
self.tool.setup()
def test_exact_match_arg(self):
# SpeechT5 isn't deterministic
torch.manual_seed(0)
result = self.tool("hey")
resulting_tensor = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],
torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]),
)
)
def test_exact_match_kwarg(self):
# SpeechT5 isn't deterministic
torch.manual_seed(0)
result = self.tool("hey")
resulting_tensor = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],
torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485]),
)
)
| 0
|
hf_public_repos/transformers/tests
|
hf_public_repos/transformers/tests/tools/test_image_question_answering.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageQuestionAnsweringToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-question-answering")
self.tool.setup()
self.remote_tool = load_tool("image-question-answering", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image, "How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image, "How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image, question="How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image, question="How many cats are sleeping on the couch?")
self.assertEqual(result, "2")
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.