repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers import (
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
)
def unwrap_schedule(scheduler, num_steps=10):
lrs = []
for _ in range(num_steps):
scheduler.step()
lrs.append(scheduler.get_lr())
return lrs
def unwrap_and_save_reload_schedule(scheduler, num_steps=10):
lrs = []
for step in range(num_steps):
scheduler.step()
lrs.append(scheduler.get_lr())
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, "schedule.bin")
torch.save(scheduler.state_dict(), file_name)
state_dict = torch.load(file_name)
scheduler.load_state_dict(state_dict)
return lrs
@require_torch
class OptimizationTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam_w(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = torch.nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0)
for _ in range(100):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
@require_torch
class ScheduleInitTest(unittest.TestCase):
m = torch.nn.Linear(50, 50) if is_torch_available() else None
optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_constant_scheduler(self):
scheduler = get_constant_schedule(self.optimizer)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [10.0] * self.num_steps
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = get_constant_schedule(self.optimizer)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_constant_scheduler(self):
scheduler = get_constant_schedule_with_warmup(self.optimizer, num_warmup_steps=4)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = get_constant_schedule_with_warmup(self.optimizer, num_warmup_steps=4)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_linear_scheduler(self):
scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_cosine_scheduler(self):
scheduler = get_cosine_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=1e-2)
scheduler = get_cosine_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_cosine_hard_restart_scheduler(self):
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
self.optimizer, num_warmup_steps=2, num_cycles=2, num_training_steps=10
)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=1e-2)
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
self.optimizer, num_warmup_steps=2, num_cycles=2, num_training_steps=10
)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
| 6,444 | 41.401316 | 110 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_marian.py | # coding=utf-8
# Copyright 2020 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.file_utils import cached_property
from transformers.hf_api import HfApi
from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
MarianConfig,
AutoConfig,
AutoModelWithLMHead,
MarianTokenizer,
MarianMTModel,
)
from transformers.convert_marian_to_pytorch import (
convert_hf_name_to_opus_name,
convert_opus_name_to_hf_name,
ORG_NAME,
)
from transformers.pipelines import TranslationPipeline
class ModelManagementTests(unittest.TestCase):
@slow
def test_model_names(self):
model_list = HfApi().model_list()
model_ids = [x.modelId for x in model_list if x.modelId.startswith(ORG_NAME)]
bad_model_ids = [mid for mid in model_ids if "+" in model_ids]
self.assertListEqual([], bad_model_ids)
self.assertGreater(len(model_ids), 500)
@require_torch
class MarianIntegrationTest(unittest.TestCase):
src = "en"
tgt = "de"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
"Tom asked his teacher for advice.",
"That's how I would do it.",
"Tom really admired Mary's courage.",
"Turn around and close your eyes.",
]
expected_text = [
"Ich bin ein kleiner Frosch.",
"Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.",
"Tom bat seinen Lehrer um Rat.",
"So würde ich das machen.",
"Tom bewunderte Marias Mut wirklich.",
"Drehen Sie sich um und schließen Sie die Augen.",
]
# ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen
@classmethod
def setUpClass(cls) -> None:
cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}"
cls.tokenizer: MarianTokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.eos_token_id = cls.tokenizer.eos_token_id
return cls
@cached_property
def model(self):
model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device)
c = model.config
self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]])
self.assertEqual(c.max_length, 512)
self.assertEqual(c.decoder_start_token_id, c.pad_token_id)
if torch_device == "cuda":
return model.half()
else:
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
self.assertListEqual(self.expected_text, generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer.prepare_translation_batch(src_texts=self.src_text, **tokenizer_kwargs).to(
torch_device
)
self.assertEqual(self.model.device, model_inputs.input_ids.device)
generated_ids = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2
)
generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return generated_words
class TestMarian_EN_DE_More(MarianIntegrationTest):
@slow
def test_forward(self):
src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."]
expected_ids = [38, 121, 14, 697, 38848, 0]
model_inputs: dict = self.tokenizer.prepare_translation_batch(src, tgt_texts=tgt).to(torch_device)
self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist())
desired_keys = {
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
}
self.assertSetEqual(desired_keys, set(model_inputs.keys()))
with torch.no_grad():
logits, *enc_features = self.model(**model_inputs)
max_indices = logits.argmax(-1)
self.tokenizer.batch_decode(max_indices)
def test_unk_support(self):
t = self.tokenizer
ids = t.prepare_translation_batch(["||"]).to(torch_device).input_ids[0].tolist()
expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id]
self.assertEqual(expected, ids)
def test_pad_not_split(self):
input_ids_w_pad = self.tokenizer.prepare_translation_batch(["I am a small frog <pad>"]).input_ids[0].tolist()
expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad
self.assertListEqual(expected_w_pad, input_ids_w_pad)
@slow
def test_batch_generation_en_de(self):
self._assert_generated_batch_equal_expected()
def test_auto_config(self):
config = AutoConfig.from_pretrained(self.model_name)
self.assertIsInstance(config, MarianConfig)
class TestMarian_EN_FR(MarianIntegrationTest):
src = "en"
tgt = "fr"
src_text = [
"I am a small frog.",
"Now I can forget the 100 words of german that I know.",
]
expected_text = [
"Je suis une petite grenouille.",
"Maintenant, je peux oublier les 100 mots d'allemand que je connais.",
]
@slow
def test_batch_generation_en_fr(self):
self._assert_generated_batch_equal_expected()
class TestMarian_FR_EN(MarianIntegrationTest):
src = "fr"
tgt = "en"
src_text = [
"Donnez moi le micro.",
"Tom et Mary étaient assis à une table.", # Accents
]
expected_text = [
"Give me the microphone.",
"Tom and Mary were sitting at a table.",
]
@slow
def test_batch_generation_fr_en(self):
self._assert_generated_batch_equal_expected()
class TestMarian_RU_FR(MarianIntegrationTest):
src = "ru"
tgt = "fr"
src_text = ["Он показал мне рукопись своей новой пьесы."]
expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."]
@slow
def test_batch_generation_ru_fr(self):
self._assert_generated_batch_equal_expected()
class TestMarian_MT_EN(MarianIntegrationTest):
src = "mt"
tgt = "en"
src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."]
expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."]
@slow
def test_batch_generation_mt_en(self):
self._assert_generated_batch_equal_expected()
class TestMarian_en_ROMANCE(MarianIntegrationTest):
"""Multilingual on target side."""
src = "en"
tgt = "ROMANCE"
src_text = [
">>fr<< Don't spend so much time watching TV.",
">>pt<< Your message has been sent.",
">>es<< He's two years older than me.",
]
expected_text = [
"Ne passez pas autant de temps à regarder la télé.",
"A sua mensagem foi enviada.",
"Es dos años más viejo que yo.",
]
@slow
def test_batch_generation_en_ROMANCE_multi(self):
self._assert_generated_batch_equal_expected()
def test_tokenizer_handles_empty(self):
normalized = self.tokenizer.normalize("")
self.assertIsInstance(normalized, str)
with self.assertRaises(ValueError):
self.tokenizer.prepare_translation_batch([""])
def test_pipeline(self):
device = 0 if torch_device == "cuda" else -1
pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=device)
output = pipeline(self.src_text)
self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
@require_torch
class TestConversionUtils(unittest.TestCase):
def test_renaming_multilingual(self):
old_names = [
"opus-mt-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"opus-mt-cmn+cn-fi", # no group
"opus-mt-en-de", # standard name
"opus-mt-en-de", # standard name
]
expected = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"]
self.assertListEqual(expected, [convert_opus_name_to_hf_name(x) for x in old_names])
def test_undoing_renaming(self):
hf_names = ["opus-mt-ZH-fi", "opus-mt-cmn_cn-fi", "opus-mt-en-de", "opus-mt-en-de"]
converted_opus_names = [convert_hf_name_to_opus_name(x) for x in hf_names]
expected_opus_names = [
"cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"cmn+cn-fi",
"en-de", # standard name
"en-de",
]
self.assertListEqual(expected_opus_names, converted_opus_names)
| 9,383 | 34.680608 | 117 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_ctrl.py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel
class CTRLModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
sequence_output, presents = model(input_ids)
result = {
"sequence_output": sequence_output,
"presents": presents,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertEqual(len(result["presents"]), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {"loss": loss, "lm_logits": lm_logits}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
@require_torch
class CTRLModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else ()
all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else ()
test_pruning = True
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class CTRLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_ctrl(self):
model = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(torch_device)
input_ids = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device
) # Legal the president is
expected_output_ids = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| 7,900 | 33.352174 | 110 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_flaubert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
from transformers import (
FlaubertConfig,
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
)
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class FlaubertModelTester(object):
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_lengths = True
self.use_token_type_ids = True
self.use_labels = True
self.gelu_activation = True
self.sinusoidal_embeddings = False
self.causal = False
self.asm = False
self.n_langs = 2
self.vocab_size = 99
self.n_special = 0
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 12
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.summary_type = "last"
self.use_proj = None
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()
input_lengths = None
if self.use_input_lengths:
input_lengths = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
config = FlaubertConfig(
vocab_size=self.vocab_size,
n_special=self.n_special,
emb_dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
gelu_activation=self.gelu_activation,
sinusoidal_embeddings=self.sinusoidal_embeddings,
asm=self.asm,
causal=self.causal,
n_langs=self.n_langs,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
summary_type=self.summary_type,
use_proj=self.use_proj,
)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_flaubert_model(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = FlaubertModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, lengths=input_lengths, langs=token_type_ids)
outputs = model(input_ids, langs=token_type_ids)
outputs = model(input_ids)
sequence_output = outputs[0]
result = {
"sequence_output": sequence_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_flaubert_lm_head(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = FlaubertWithLMHeadModel(config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.vocab_size])
def create_and_check_flaubert_simple_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = FlaubertForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
loss, start_logits, end_logits = outputs
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_flaubert_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = FlaubertForQuestionAnswering(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = outputs
outputs = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
outputs = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
(total_loss,) = outputs
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = outputs
result = {
"loss": total_loss,
"start_top_log_probs": start_top_log_probs,
"start_top_index": start_top_index,
"end_top_log_probs": end_top_log_probs,
"end_top_index": end_top_index,
"cls_logits": cls_logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["start_top_log_probs"].size()), [self.batch_size, model.config.start_n_top]
)
self.parent.assertListEqual(
list(result["start_top_index"].size()), [self.batch_size, model.config.start_n_top]
)
self.parent.assertListEqual(
list(result["end_top_log_probs"].size()),
[self.batch_size, model.config.start_n_top * model.config.end_n_top],
)
self.parent.assertListEqual(
list(result["end_top_index"].size()), [self.batch_size, model.config.start_n_top * model.config.end_n_top],
)
self.parent.assertListEqual(list(result["cls_logits"].size()), [self.batch_size])
def create_and_check_flaubert_sequence_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = FlaubertForSequenceClassification(config)
model.to(torch_device)
model.eval()
(logits,) = model(input_ids)
loss, logits = model(input_ids, labels=sequence_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.type_sequence_label_size])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class FlaubertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = FlaubertModelTester(self)
self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_flaubert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*config_and_inputs)
def test_flaubert_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs)
def test_flaubert_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs)
def test_flaubert_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*config_and_inputs)
def test_flaubert_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| 12,049 | 32.472222 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_mobilebert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
MobileBertConfig,
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertForMultipleChoice,
)
class MobileBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
embedding_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = MobileBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_mobilebert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertModel(config=config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_mobilebert_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = MobileBertModel(config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output, pooled_output = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_mobilebert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_mobilebert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
loss, seq_relationship_score = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, next_sentence_label=sequence_labels,
)
result = {
"loss": loss,
"seq_relationship_score": seq_relationship_score,
}
self.parent.assertListEqual(list(result["seq_relationship_score"].size()), [self.batch_size, 2])
self.check_loss_output(result)
def create_and_check_mobilebert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForPreTraining(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores, seq_relationship_score = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
"seq_relationship_score": seq_relationship_score,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(list(result["seq_relationship_score"].size()), [self.batch_size, 2])
self.check_loss_output(result)
def create_and_check_mobilebert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_mobilebert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
loss, logits = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_mobilebert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def create_and_check_mobilebert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = MobileBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss, logits = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class MobileBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = MobileBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_mobilebert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*config_and_inputs)
def test_mobilebert_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_mobilebert_model_as_decoder(*config_and_inputs)
def test_mobilebert_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_mobilebert_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device,)
TOLERANCE = 1e-3
@require_torch
class MobileBertModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(torch_device)
input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 9, 512))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
],
device=torch_device,
)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lower_bound = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
upper_bound = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| 19,333 | 38.217039 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_tf_distilbert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_distilbert import (
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
class TFDistilBertModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = False
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = DistilBertConfig(
vocab_size=self.vocab_size,
dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
hidden_dim=self.intermediate_size,
hidden_act=self.hidden_act,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_distilbert_model(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDistilBertModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
outputs = model(inputs)
sequence_output = outputs[0]
inputs = [input_ids, input_mask]
(sequence_output,) = model(inputs)
result = {
"sequence_output": sequence_output.numpy(),
}
self.parent.assertListEqual(
list(result["sequence_output"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_distilbert_for_masked_lm(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDistilBertForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
(prediction_scores,) = model(inputs)
result = {
"prediction_scores": prediction_scores.numpy(),
}
self.parent.assertListEqual(
list(result["prediction_scores"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
def create_and_check_distilbert_for_question_answering(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDistilBertForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
start_logits, end_logits = model(inputs)
result = {
"start_logits": start_logits.numpy(),
"end_logits": end_logits.numpy(),
}
self.parent.assertListEqual(list(result["start_logits"].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].shape), [self.batch_size, self.seq_length])
def create_and_check_distilbert_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFDistilBertForSequenceClassification(config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
(logits,) = model(inputs)
result = {
"logits": logits.numpy(),
}
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_labels])
def create_and_check_distilbert_for_multiple_choice(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFDistilBertForMultipleChoice(config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
(logits,) = model(inputs)
result = {
"logits": logits.numpy(),
}
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices])
def create_and_check_distilbert_for_token_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFDistilBertForTokenClassification(config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask}
(logits,) = model(inputs)
result = {
"logits": logits.numpy(),
}
self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
test_pruning = True
test_torchscript = True
test_head_masking = True
def setUp(self):
self.model_tester = TFDistilBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_distilbert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs)
# @slow
# def test_model_from_pretrained(self):
# for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
# model = DistilBertModesss.from_pretrained(model_name)
# self.assertIsNotNone(model)
| 9,701 | 38.762295 | 118 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_doc_samples.py | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
logger = logging.getLogger()
@require_torch
@require_tf
@slow
class TestCodeExamples(unittest.TestCase):
def analyze_directory(
self,
directory: Path,
identifier: Union[str, None] = None,
ignore_files: Union[List[str], None] = [],
n_identifier: Union[str, None] = None,
only_modules: bool = True,
):
"""
Runs through the specific directory, looking for the files identified with `identifier`. Executes
the doctests in those files
Args:
directory (:obj:`str`): Directory containing the files
identifier (:obj:`str`): Will parse files containing this
ignore_files (:obj:`List[str]`): List of files to skip
n_identifier (:obj:`str` or :obj:`List[str]`): Will not parse files containing this/these identifiers.
only_modules (:obj:`bool`): Whether to only analyze modules
"""
files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
if identifier is not None:
files = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(n_identifier, List):
for n_ in n_identifier:
files = [file for file in files if n_ not in file]
else:
files = [file for file in files if n_identifier not in file]
ignore_files.append("__init__.py")
files = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing", file)
if only_modules:
try:
module_identifier = file.split(".")[0]
module_identifier = getattr(transformers, module_identifier)
suite = doctest.DocTestSuite(module_identifier)
result = unittest.TextTestRunner().run(suite)
self.assertIs(len(result.failures), 0)
except AttributeError:
logger.info(f"{module_identifier} is not a module.")
else:
result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed, 0)
def test_modeling_examples(self):
transformers_directory = "src/transformers"
files = "modeling"
ignore_files = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files)
def test_tokenization_examples(self):
transformers_directory = Path("src/transformers")
files = "tokenization"
self.analyze_directory(transformers_directory, identifier=files)
def test_configuration_examples(self):
transformers_directory = Path("src/transformers")
files = "configuration"
self.analyze_directory(transformers_directory, identifier=files)
def test_remaining_examples(self):
transformers_directory = Path("src/transformers")
n_identifiers = ["configuration", "modeling", "tokenization"]
self.analyze_directory(transformers_directory, n_identifier=n_identifiers)
def test_doc_sources(self):
doc_source_directory = Path("docs/source")
ignore_files = ["favicon.ico"]
self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
| 4,326 | 37.292035 | 114 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_tokenization_transfo_xl.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
from .test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
from transformers.tokenization_transfo_xl import TransfoXLTokenizer, VOCAB_FILES_NAMES
@require_torch
class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TransfoXLTokenizer if is_torch_available() else None
def setUp(self):
super().setUp()
vocab_tokens = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
kwargs["lower_case"] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "<unk> UNwanted , running"
output_text = "<unk> unwanted, running"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True)
tokens = tokenizer.tokenize("<unk> UNwanted , running")
self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
def test_full_tokenizer_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"]
)
def test_full_tokenizer_no_lower(self):
tokenizer = TransfoXLTokenizer(lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_move_added_token(self):
tokenizer = self.get_tokenizer()
original_len = len(tokenizer)
tokenizer.add_tokens(["new1", "new2"])
tokenizer.move_added_token("new1", 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(tokenizer), original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1"), [1])
self.assertEqual(tokenizer.decode([1]), "new1")
| 3,299 | 32.673469 | 106 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_xlnet.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
XLNetConfig,
XLNetModel,
XLNetLMHeadModel,
XLNetForMultipleChoice,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetForQuestionAnswering,
)
from transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST
class XLNetModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
mem_len=10,
clamp_len=-1,
reuse_len=15,
is_training=True,
use_labels=True,
vocab_size=99,
cutoffs=[10, 50, 80],
hidden_size=32,
num_attention_heads=4,
d_inner=128,
num_hidden_layers=5,
type_sequence_label_size=2,
untie_r=True,
bi_data=False,
same_length=False,
initializer_range=0.05,
seed=1,
type_vocab_size=2,
bos_token_id=1,
eos_token_id=2,
pad_token_id=5,
num_choices=4,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.mem_len = 10
# self.key_len = seq_length + mem_len
self.clamp_len = -1
self.reuse_len = 15
self.is_training = True
self.use_labels = True
self.vocab_size = 99
self.cutoffs = [10, 50, 80]
self.hidden_size = 32
self.num_attention_heads = 4
self.d_inner = 128
self.num_hidden_layers = 5
self.type_sequence_label_size = 2
self.untie_r = True
self.bi_data = False
self.same_length = False
self.initializer_range = 0.05
self.seed = 1
self.type_vocab_size = 2
self.bos_token_id = 1
self.eos_token_id = 2
self.pad_token_id = 5
self.num_choices = 4
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()
input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size)
perm_mask = torch.zeros(
self.batch_size, self.seq_length + 1, self.seq_length + 1, dtype=torch.float, device=torch_device,
)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros(self.batch_size, 1, self.seq_length + 1, dtype=torch.float, device=torch_device,)
target_mapping[:, 0, -1] = 1.0 # predict last token
sequence_labels = None
lm_labels = None
is_impossible_labels = None
token_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
token_labels = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = XLNetConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
n_head=self.num_attention_heads,
d_inner=self.d_inner,
n_layer=self.num_hidden_layers,
untie_r=self.untie_r,
mem_len=self.mem_len,
clamp_len=self.clamp_len,
same_length=self.same_length,
reuse_len=self.reuse_len,
bi_data=self.bi_data,
initializer_range=self.initializer_range,
num_labels=self.type_sequence_label_size,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
)
return (
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
)
def set_seed(self):
random.seed(self.seed)
torch.manual_seed(self.seed)
def create_and_check_xlnet_base_model(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config)
model.to(torch_device)
model.eval()
_, _ = model(input_ids_1, input_mask=input_mask)
_, _ = model(input_ids_1, attention_mask=input_mask)
_, _ = model(input_ids_1, token_type_ids=segment_ids)
outputs, mems_1 = model(input_ids_1)
result = {
"mems_1": mems_1,
"outputs": outputs,
}
config.mem_len = 0
model = XLNetModel(config)
model.to(torch_device)
model.eval()
no_mems_outputs = model(input_ids_1)
self.parent.assertEqual(len(no_mems_outputs), 1)
self.parent.assertListEqual(
list(result["outputs"].size()), [self.batch_size, self.seq_length, self.hidden_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_1"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_base_model_with_att_output(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config)
model.to(torch_device)
model.eval()
_, _, attentions = model(input_ids_1, target_mapping=target_mapping, output_attentions=True)
self.parent.assertEqual(len(attentions), config.n_layer)
self.parent.assertIsInstance(attentions[0], tuple)
self.parent.assertEqual(len(attentions[0]), 2)
self.parent.assertTrue(attentions[0][0].shape, attentions[0][0].shape)
def create_and_check_xlnet_lm_head(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetLMHeadModel(config)
model.to(torch_device)
model.eval()
loss_1, all_logits_1, mems_1 = model(input_ids_1, token_type_ids=segment_ids, labels=lm_labels)
loss_2, all_logits_2, mems_2 = model(input_ids_2, token_type_ids=segment_ids, labels=lm_labels, mems=mems_1)
logits, _ = model(input_ids_q, perm_mask=perm_mask, target_mapping=target_mapping)
result = {
"loss_1": loss_1,
"mems_1": mems_1,
"all_logits_1": all_logits_1,
"loss_2": loss_2,
"mems_2": mems_2,
"all_logits_2": all_logits_2,
}
self.parent.assertListEqual(list(result["loss_1"].size()), [])
self.parent.assertListEqual(
list(result["all_logits_1"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_1"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
self.parent.assertListEqual(list(result["loss_2"].size()), [])
self.parent.assertListEqual(
list(result["all_logits_2"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_2"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_qa(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForQuestionAnswering(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids_1)
(start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits, mems,) = outputs
outputs = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
outputs = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
total_loss, mems = outputs
outputs = model(input_ids_1, start_positions=sequence_labels, end_positions=sequence_labels,)
total_loss, mems = outputs
result = {
"loss": total_loss,
"start_top_log_probs": start_top_log_probs,
"start_top_index": start_top_index,
"end_top_log_probs": end_top_log_probs,
"end_top_index": end_top_index,
"cls_logits": cls_logits,
"mems": mems,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["start_top_log_probs"].size()), [self.batch_size, model.config.start_n_top],
)
self.parent.assertListEqual(
list(result["start_top_index"].size()), [self.batch_size, model.config.start_n_top],
)
self.parent.assertListEqual(
list(result["end_top_log_probs"].size()),
[self.batch_size, model.config.start_n_top * model.config.end_n_top],
)
self.parent.assertListEqual(
list(result["end_top_index"].size()), [self.batch_size, model.config.start_n_top * model.config.end_n_top],
)
self.parent.assertListEqual(list(result["cls_logits"].size()), [self.batch_size])
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_token_classif(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForTokenClassification(config)
model.to(torch_device)
model.eval()
logits, mems_1 = model(input_ids_1)
loss, logits, mems_1 = model(input_ids_1, labels=token_labels)
result = {
"loss": loss,
"mems_1": mems_1,
"logits": logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["logits"].size()), [self.batch_size, self.seq_length, self.type_sequence_label_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_1"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_xlnet_sequence_classif(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForSequenceClassification(config)
model.to(torch_device)
model.eval()
logits, mems_1 = model(input_ids_1)
loss, logits, mems_1 = model(input_ids_1, labels=sequence_labels)
result = {
"loss": loss,
"mems_1": mems_1,
"logits": logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["logits"].size()), [self.batch_size, self.type_sequence_label_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_1"]),
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids_1}
return config, inputs_dict
@require_torch
class XLNetModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
XLNetModel,
XLNetLMHeadModel,
XLNetForTokenClassification,
XLNetForSequenceClassification,
XLNetForQuestionAnswering,
XLNetForMultipleChoice,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (
(XLNetLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
test_pruning = False
def setUp(self):
self.model_tester = XLNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlnet_base_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs)
def test_xlnet_base_model_with_att_output(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model_with_att_output(*config_and_inputs)
def test_xlnet_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs)
def test_xlnet_sequence_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs)
def test_xlnet_token_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_token_classif(*config_and_inputs)
def test_xlnet_qa(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_qa(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XLNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class XLNetModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_xlnet_base_cased(self):
model = XLNetLMHeadModel.from_pretrained("xlnet-base-cased")
model.to(torch_device)
input_ids = torch.tensor(
[
[
67,
2840,
19,
18,
1484,
20,
965,
29077,
8719,
1273,
21,
45,
273,
17,
10,
15048,
28,
27511,
21,
4185,
11,
41,
2444,
9,
32,
1025,
20,
8719,
26,
23,
673,
966,
19,
29077,
20643,
27511,
20822,
20643,
19,
17,
6616,
17511,
18,
8978,
20,
18,
777,
9,
19233,
1527,
17669,
19,
24,
673,
17,
28756,
150,
12943,
4354,
153,
27,
442,
37,
45,
668,
21,
24,
256,
20,
416,
22,
2771,
4901,
9,
12943,
4354,
153,
51,
24,
3004,
21,
28142,
23,
65,
20,
18,
416,
34,
24,
2958,
22947,
9,
1177,
45,
668,
3097,
13768,
23,
103,
28,
441,
148,
48,
20522,
19,
12943,
4354,
153,
12860,
34,
18,
326,
27,
17492,
684,
21,
6709,
9,
8585,
123,
266,
19,
12943,
4354,
153,
6872,
24,
3004,
20,
18,
9225,
2198,
19,
12717,
103,
22,
401,
24,
6348,
9,
12943,
4354,
153,
1068,
2768,
2286,
19,
33,
104,
19,
176,
24,
9313,
19,
20086,
28,
45,
10292,
9,
4,
3,
]
],
dtype=torch.long,
device=torch_device,
)
# In 1991, the remains of Russian Tsar Nicholas II and his family
# (except for Alexei and Maria) are discovered.
# The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
# remainder of the story. 1883 Western Siberia,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic.
# Rasputin has a vision and denounces one of the men as a horse thief. Although his
# father initially slaps him for making such an accusation, Rasputin watches as the
# man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
# the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
# with people, even a bishop, begging for his blessing. """
expected_output_ids = [
67,
2840,
19,
18,
1484,
20,
965,
29077,
8719,
1273,
21,
45,
273,
17,
10,
15048,
28,
27511,
21,
4185,
11,
41,
2444,
9,
32,
1025,
20,
8719,
26,
23,
673,
966,
19,
29077,
20643,
27511,
20822,
20643,
19,
17,
6616,
17511,
18,
8978,
20,
18,
777,
9,
19233,
1527,
17669,
19,
24,
673,
17,
28756,
150,
12943,
4354,
153,
27,
442,
37,
45,
668,
21,
24,
256,
20,
416,
22,
2771,
4901,
9,
12943,
4354,
153,
51,
24,
3004,
21,
28142,
23,
65,
20,
18,
416,
34,
24,
2958,
22947,
9,
1177,
45,
668,
3097,
13768,
23,
103,
28,
441,
148,
48,
20522,
19,
12943,
4354,
153,
12860,
34,
18,
326,
27,
17492,
684,
21,
6709,
9,
8585,
123,
266,
19,
12943,
4354,
153,
6872,
24,
3004,
20,
18,
9225,
2198,
19,
12717,
103,
22,
401,
24,
6348,
9,
12943,
4354,
153,
1068,
2768,
2286,
19,
33,
104,
19,
176,
24,
9313,
19,
20086,
28,
45,
10292,
9,
4,
3,
19,
12943,
4354,
153,
27,
442,
22,
2771,
4901,
9,
69,
27,
50,
551,
22,
2771,
4901,
19,
21,
45,
668,
21,
18,
416,
41,
1499,
22,
755,
18,
14285,
9,
12943,
4354,
153,
27,
1499,
22,
642,
22,
]
# In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria)
# are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich,
# narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin
# is asked by his father and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially slaps
# him for making such an accusation, Rasputin watches as the man is chased outside and beaten.
# Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing.
# <sep><cls>, Rasputin is asked to perform magic.
# He is not able to perform magic, and his father and
# the men are forced to leave the monastery. Rasputin is forced to return to
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| 26,745 | 28.166848 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_camembert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import CamembertModel
@require_torch
class CamembertModelIntegrationTest(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = CamembertModel.from_pretrained("camembert-base")
model.to(torch_device)
input_ids = torch.tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], device=torch_device, dtype=torch.long,
) # J'aime le camembert !
output = model(input_ids)[0]
expected_shape = torch.Size((1, 10, 768))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],
device=torch_device,
dtype=torch.float,
)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 1,904 | 36.352941 | 99 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_tf_transfo_xl.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import (
TFTransfoXLModel,
TFTransfoXLLMHeadModel,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class TFTransfoXLModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.mem_len = 30
self.key_length = self.seq_length + self.mem_len
self.clamp_len = 15
self.is_training = True
self.use_labels = True
self.vocab_size = 99
self.cutoffs = [10, 50, 80]
self.hidden_size = 32
self.d_embed = 32
self.num_attention_heads = 4
self.d_head = 8
self.d_inner = 128
self.div_val = 2
self.num_hidden_layers = 5
self.scope = None
self.seed = 1
self.eos_token_id = 0
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = TransfoXLConfig(
vocab_size=self.vocab_size,
mem_len=self.mem_len,
clamp_len=self.clamp_len,
cutoffs=self.cutoffs,
d_model=self.hidden_size,
d_embed=self.d_embed,
n_head=self.num_attention_heads,
d_head=self.d_head,
d_inner=self.d_inner,
div_val=self.div_val,
n_layer=self.num_hidden_layers,
eos_token_id=self.eos_token_id,
)
return (config, input_ids_1, input_ids_2, lm_labels)
def set_seed(self):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def create_and_check_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):
model = TFTransfoXLModel(config)
hidden_states_1, mems_1 = model(input_ids_1)
inputs = {"input_ids": input_ids_2, "mems": mems_1}
hidden_states_2, mems_2 = model(inputs)
result = {
"hidden_states_1": hidden_states_1.numpy(),
"mems_1": [mem.numpy() for mem in mems_1],
"hidden_states_2": hidden_states_2.numpy(),
"mems_2": [mem.numpy() for mem in mems_2],
}
self.parent.assertListEqual(
list(result["hidden_states_1"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(
list(result["hidden_states_2"].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(
list(list(mem.shape) for mem in result["mems_1"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
self.parent.assertListEqual(
list(list(mem.shape) for mem in result["mems_2"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_and_check_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):
model = TFTransfoXLLMHeadModel(config)
lm_logits_1, mems_1 = model(input_ids_1)
inputs = {"input_ids": input_ids_1, "labels": lm_labels}
_, mems_1 = model(inputs)
lm_logits_2, mems_2 = model([input_ids_2, mems_1])
inputs = {"input_ids": input_ids_1, "mems": mems_1, "labels": lm_labels}
_, mems_2 = model(inputs)
result = {
"mems_1": [mem.numpy() for mem in mems_1],
"lm_logits_1": lm_logits_1.numpy(),
"mems_2": [mem.numpy() for mem in mems_2],
"lm_logits_2": lm_logits_2.numpy(),
}
self.parent.assertListEqual(
list(result["lm_logits_1"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(
list(list(mem.shape) for mem in result["mems_1"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
self.parent.assertListEqual(
list(result["lm_logits_2"].shape), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(
list(list(mem.shape) for mem in result["mems_2"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids_1}
return config, inputs_dict
@require_tf
class TFTransfoXLModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFTransfoXLModel, TFTransfoXLLMHeadModel) if is_tf_available() else ()
all_generative_model_classes = () if is_tf_available() else ()
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = TFTransfoXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_transfo_xl_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs)
def test_transfo_xl_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFTransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_transfo_xl_wt103(self):
model = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
input_ids = tf.convert_to_tensor(
[
[
33,
1297,
2,
1,
1009,
4,
1109,
11739,
4762,
358,
5,
25,
245,
22,
1706,
17,
20098,
5,
3215,
21,
37,
1110,
3,
13,
1041,
4,
24,
603,
490,
2,
71477,
20098,
104447,
2,
20961,
1,
2604,
4,
1,
329,
3,
6224,
831,
16002,
2,
8,
603,
78967,
29546,
23,
803,
20,
25,
416,
5,
8,
232,
4,
277,
6,
1855,
4601,
3,
29546,
54,
8,
3609,
5,
57211,
49,
4,
1,
277,
18,
8,
1755,
15691,
3,
341,
25,
416,
693,
42573,
71,
17,
401,
94,
31,
17919,
2,
29546,
7873,
18,
1,
435,
23,
11011,
755,
5,
5167,
3,
7983,
98,
84,
2,
29546,
3267,
8,
3609,
4,
1,
4865,
1075,
2,
6087,
71,
6,
346,
8,
5854,
3,
29546,
824,
1400,
1868,
2,
19,
160,
2,
311,
8,
5496,
2,
20920,
17,
25,
15097,
3,
24,
24,
0,
]
],
dtype=tf.int32,
)
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
expected_output_ids = [
33,
1297,
2,
1,
1009,
4,
1109,
11739,
4762,
358,
5,
25,
245,
22,
1706,
17,
20098,
5,
3215,
21,
37,
1110,
3,
13,
1041,
4,
24,
603,
490,
2,
71477,
20098,
104447,
2,
20961,
1,
2604,
4,
1,
329,
3,
6224,
831,
16002,
2,
8,
603,
78967,
29546,
23,
803,
20,
25,
416,
5,
8,
232,
4,
277,
6,
1855,
4601,
3,
29546,
54,
8,
3609,
5,
57211,
49,
4,
1,
277,
18,
8,
1755,
15691,
3,
341,
25,
416,
693,
42573,
71,
17,
401,
94,
31,
17919,
2,
29546,
7873,
18,
1,
435,
23,
11011,
755,
5,
5167,
3,
7983,
98,
84,
2,
29546,
3267,
8,
3609,
4,
1,
4865,
1075,
2,
6087,
71,
6,
346,
8,
5854,
3,
29546,
824,
1400,
1868,
2,
19,
160,
2,
311,
8,
5496,
2,
20920,
17,
25,
15097,
3,
24,
24,
0,
33,
1,
1857,
2,
1,
1009,
4,
1109,
11739,
4762,
358,
5,
25,
245,
28,
1110,
3,
13,
1041,
4,
24,
603,
490,
2,
71477,
20098,
104447,
2,
20961,
1,
2604,
4,
1,
329,
3,
0,
]
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
| 15,933 | 27.555556 | 103 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_tokenization_marian.py | # coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers.testing_utils import _torch_available
from transformers.tokenization_marian import MarianTokenizer, save_json, vocab_files_names
from transformers.tokenization_utils import BatchEncoding
from .test_tokenization_common import TokenizerTesterMixin
SAMPLE_SP = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
mock_tokenizer_config = {"target_lang": "fi", "source_lang": "en"}
zh_code = ">>zh<<"
ORG_NAME = "Helsinki-NLP/"
FRAMEWORK = "pt" if _torch_available else "tf"
class MarianTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MarianTokenizer
def setUp(self):
super().setUp()
vocab = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(self.tmpdirname)
save_json(vocab_tokens, save_dir / vocab_files_names["vocab"])
save_json(mock_tokenizer_config, save_dir / vocab_files_names["tokenizer_config_file"])
if not (save_dir / vocab_files_names["source_spm"]).exists():
copyfile(SAMPLE_SP, save_dir / vocab_files_names["source_spm"])
copyfile(SAMPLE_SP, save_dir / vocab_files_names["target_spm"])
tokenizer = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
return (
"This is a test",
"This is a test",
)
def test_tokenizer_equivalence_en_de(self):
en_de_tokenizer = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de")
batch = en_de_tokenizer.prepare_translation_batch(["I am a small frog"], return_tensors=None)
self.assertIsInstance(batch, BatchEncoding)
expected = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(expected, batch.input_ids[0])
save_dir = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(save_dir)
contents = [x.name for x in Path(save_dir).glob("*")]
self.assertIn("source.spm", contents)
MarianTokenizer.from_pretrained(save_dir)
def test_outputs_not_longer_than_maxlen(self):
tok = self.get_tokenizer()
batch = tok.prepare_translation_batch(
["I am a small frog" * 1000, "I am a small frog"], return_tensors=FRAMEWORK
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 512))
def test_outputs_can_be_shorter(self):
tok = self.get_tokenizer()
batch_smaller = tok.prepare_translation_batch(
["I am a tiny frog", "I am a small frog"], return_tensors=FRAMEWORK
)
self.assertIsInstance(batch_smaller, BatchEncoding)
self.assertEqual(batch_smaller.input_ids.shape, (2, 10))
| 3,683 | 38.191489 | 105 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_electra.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
from transformers import (
ElectraConfig,
ElectraModel,
ElectraForMaskedLM,
ElectraForTokenClassification,
ElectraForPreTraining,
ElectraForMultipleChoice,
ElectraForSequenceClassification,
ElectraForQuestionAnswering,
)
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
class ElectraModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
fake_token_labels = ids_tensor([self.batch_size, self.seq_length], 1)
config = ElectraConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_electra_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
model = ElectraModel(config=config)
model.to(torch_device)
model.eval()
(sequence_output,) = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
(sequence_output,) = model(input_ids, token_type_ids=token_type_ids)
(sequence_output,) = model(input_ids)
result = {
"sequence_output": sequence_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_electra_for_masked_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
model = ElectraForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_electra_for_token_classification(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_labels = self.num_labels
model = ElectraForTokenClassification(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def create_and_check_electra_for_pretraining(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_labels = self.num_labels
model = ElectraForPreTraining(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=fake_token_labels
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_electra_for_sequence_classification(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_labels = self.num_labels
model = ElectraForSequenceClassification(config)
model.to(torch_device)
model.eval()
loss, logits = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_electra_for_question_answering(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
model = ElectraForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_electra_for_multiple_choice(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
):
config.num_choices = self.num_choices
model = ElectraForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss, logits = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
fake_token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class ElectraModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ElectraModel,
ElectraForPreTraining,
ElectraForMaskedLM,
ElectraForTokenClassification,
ElectraForSequenceClassification,
ElectraForQuestionAnswering,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = ElectraModelTester(self)
self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_electra_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs)
def test_for_pre_training(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_sequence_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_question_answering(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| 12,928 | 33.662198 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_tokenization_utils.py | # coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import unittest
from typing import Callable, Optional
from transformers import BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, TensorType
from transformers.testing_utils import require_tf, require_torch, slow
from transformers.tokenization_gpt2 import GPT2Tokenizer
class TokenizerUtilsTest(unittest.TestCase):
def check_tokenizer_from_pretrained(self, tokenizer_class):
s3_models = list(tokenizer_class.max_model_input_sizes.keys())
for model_name in s3_models[:1]:
tokenizer = tokenizer_class.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, tokenizer_class)
self.assertIsInstance(tokenizer, PreTrainedTokenizer)
for special_tok in tokenizer.all_special_tokens:
self.assertIsInstance(special_tok, str)
special_tok_id = tokenizer.convert_tokens_to_ids(special_tok)
self.assertIsInstance(special_tok_id, int)
def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None):
batch_encoding_str = pickle.dumps(be_original)
self.assertIsNotNone(batch_encoding_str)
be_restored = pickle.loads(batch_encoding_str)
# Ensure is_fast is correctly restored
self.assertEqual(be_restored.is_fast, be_original.is_fast)
# Ensure encodings are potentially correctly restored
if be_original.is_fast:
self.assertIsNotNone(be_restored.encodings)
else:
self.assertIsNone(be_restored.encodings)
# Ensure the keys are the same
for original_v, restored_v in zip(be_original.values(), be_restored.values()):
if equal_op:
self.assertTrue(equal_op(restored_v, original_v))
else:
self.assertEqual(restored_v, original_v)
@slow
def test_pretrained_tokenizers(self):
self.check_tokenizer_from_pretrained(GPT2Tokenizer)
def test_tensor_type_from_str(self):
self.assertEqual(TensorType("tf"), TensorType.TENSORFLOW)
self.assertEqual(TensorType("pt"), TensorType.PYTORCH)
self.assertEqual(TensorType("np"), TensorType.NUMPY)
def test_batch_encoding_pickle(self):
import numpy as np
tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased")
# Python no tensor
with self.subTest("BatchEncoding (Python, return_tensors=None)"):
self.assert_dump_and_restore(tokenizer_p("Small example to encode"))
with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"):
self.assert_dump_and_restore(
tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal
)
with self.subTest("BatchEncoding (Rust, return_tensors=None)"):
self.assert_dump_and_restore(tokenizer_r("Small example to encode"))
with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"):
self.assert_dump_and_restore(
tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal
)
@require_tf
def test_batch_encoding_pickle_tf(self):
import tensorflow as tf
def tf_array_equals(t1, t2):
return tf.reduce_all(tf.equal(t1, t2))
tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased")
with self.subTest("BatchEncoding (Python, return_tensors=TENSORFLOW)"):
self.assert_dump_and_restore(
tokenizer_p("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals
)
with self.subTest("BatchEncoding (Rust, return_tensors=TENSORFLOW)"):
self.assert_dump_and_restore(
tokenizer_r("Small example to encode", return_tensors=TensorType.TENSORFLOW), tf_array_equals
)
@require_torch
def test_batch_encoding_pickle_pt(self):
import torch
tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased")
with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"):
self.assert_dump_and_restore(
tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal
)
with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"):
self.assert_dump_and_restore(
tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal
)
def test_batch_encoding_is_fast(self):
tokenizer_p = BertTokenizer.from_pretrained("bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("bert-base-cased")
with self.subTest("Python Tokenizer"):
self.assertFalse(tokenizer_p("Small example to_encode").is_fast)
with self.subTest("Rust Tokenizer"):
self.assertTrue(tokenizer_r("Small example to_encode").is_fast)
| 5,841 | 41.333333 | 109 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_onnx.py | import unittest
from os.path import dirname, exists
from shutil import rmtree
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import convert, ensure_valid_input, infer_shapes
from transformers.testing_utils import require_tf, require_torch, slow
class FuncContiguousArgs:
def forward(self, input_ids, token_type_ids, attention_mask):
return None
class FuncNonContiguousArgs:
def forward(self, input_ids, some_other_args, token_type_ids, attention_mask):
return None
class OnnxExportTestCase(unittest.TestCase):
MODEL_TO_TEST = ["bert-base-cased", "gpt2", "roberta-base"]
@require_tf
@slow
def test_export_tensorflow(self):
for model in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(model, "tf", 11)
@require_torch
@slow
def test_export_pytorch(self):
for model in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(model, "pt", 11)
@require_torch
@slow
def test_export_custom_bert_model(self):
from transformers import BertModel
vocab = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t") as vocab_file:
vocab_file.write("\n".join(vocab))
vocab_file.flush()
tokenizer = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
model = BertModel(BertConfig(vocab_size=len(vocab)))
model.save_pretrained(bert_save_dir)
self._test_export(bert_save_dir, "pt", 11, tokenizer)
def _test_export(self, model, framework, opset, tokenizer=None):
try:
# Compute path
with TemporaryDirectory() as tempdir:
path = tempdir + "/model.onnx"
# Remove folder if exists
if exists(dirname(path)):
rmtree(dirname(path))
# Export
convert(framework, model, path, opset, tokenizer)
except Exception as e:
self.fail(e)
@require_torch
def test_infer_dynamic_axis_pytorch(self):
"""
Validate the dynamic axis generated for each parameters are correct
"""
from transformers import BertModel
model = BertModel(BertConfig.from_pretrained("bert-base-cased"))
tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased")
self._test_infer_dynamic_axis(model, tokenizer, "pt")
@require_tf
def test_infer_dynamic_axis_tf(self):
"""
Validate the dynamic axis generated for each parameters are correct
"""
from transformers import TFBertModel
model = TFBertModel(BertConfig.from_pretrained("bert-base-cased"))
tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased")
self._test_infer_dynamic_axis(model, tokenizer, "tf")
def _test_infer_dynamic_axis(self, model, tokenizer, framework):
nlp = FeatureExtractionPipeline(model, tokenizer)
variable_names = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
input_vars, output_vars, shapes, tokens = infer_shapes(nlp, framework)
# Assert all variables are present
self.assertEqual(len(shapes), len(variable_names))
self.assertTrue(all([var_name in shapes for var_name in variable_names]))
self.assertSequenceEqual(variable_names[:3], input_vars)
self.assertSequenceEqual(variable_names[3:], output_vars)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: "batch", 1: "sequence"})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"], {0: "batch", 1: "sequence"})
self.assertDictEqual(shapes["output_1"], {0: "batch"})
def test_ensure_valid_input(self):
"""
Validate parameters are correctly exported
GPT2 has "past" parameter in the middle of input_ids, token_type_ids and attention_mask.
ONNX doesn't support export with a dictionary, only a tuple. Thus we need to ensure we remove
token_type_ids and attention_mask for now to not having a None tensor in the middle
"""
# All generated args are valid
input_names = ["input_ids", "attention_mask", "token_type_ids"]
tokens = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
ordered_input_names, inputs_args = ensure_valid_input(FuncContiguousArgs(), tokens, input_names)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(inputs_args), 3)
# Should have exactly the same input names
self.assertEqual(set(ordered_input_names), set(input_names))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(inputs_args, (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
ordered_input_names, inputs_args = ensure_valid_input(FuncNonContiguousArgs(), tokens, input_names)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(inputs_args), 1)
self.assertEqual(len(ordered_input_names), 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens["input_ids"])
self.assertEqual(ordered_input_names[0], "input_ids")
| 5,853 | 40.51773 | 112 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_activations.py | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.activations import _gelu_python, get_activation, gelu_new
import torch
@require_torch
class TestActivations(unittest.TestCase):
def test_gelu_versions(self):
x = torch.Tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
torch_builtin = get_activation("gelu")
self.assertTrue(torch.eq(_gelu_python(x), torch_builtin(x)).all().item())
self.assertFalse(torch.eq(_gelu_python(x), gelu_new(x)).all().item())
def test_get_activation(self):
get_activation("swish")
get_activation("relu")
get_activation("tanh")
get_activation("gelu_new")
get_activation("gelu_fast")
with self.assertRaises(KeyError):
get_activation("bogus")
with self.assertRaises(KeyError):
get_activation(None)
| 953 | 30.8 | 81 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_pipelines.py | import unittest
from typing import Iterable, List, Optional
from transformers import pipeline
from transformers.pipelines import SUPPORTED_TASKS, DefaultArgumentHandler, Pipeline
from transformers.testing_utils import require_tf, require_torch, slow, torch_device
DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0
VALID_INPUTS = ["A simple string", ["list of strings"]]
NER_FINETUNED_MODELS = ["sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"]
# xlnet-base-cased disabled for now, since it crashes TF2
FEATURE_EXTRACT_FINETUNED_MODELS = ["sshleifer/tiny-distilbert-base-cased"]
TEXT_CLASSIF_FINETUNED_MODELS = ["sshleifer/tiny-distilbert-base-uncased-finetuned-sst-2-english"]
TEXT_GENERATION_FINETUNED_MODELS = ["sshleifer/tiny-ctrl"]
FILL_MASK_FINETUNED_MODELS = ["sshleifer/tiny-distilroberta-base"]
LARGE_FILL_MASK_FINETUNED_MODELS = ["distilroberta-base"] # @slow
SUMMARIZATION_FINETUNED_MODELS = ["sshleifer/bart-tiny-random", "patrickvonplaten/t5-tiny-random"]
TF_SUMMARIZATION_FINETUNED_MODELS = ["patrickvonplaten/t5-tiny-random"]
TRANSLATION_FINETUNED_MODELS = [
("patrickvonplaten/t5-tiny-random", "translation_en_to_de"),
("patrickvonplaten/t5-tiny-random", "translation_en_to_ro"),
]
TF_TRANSLATION_FINETUNED_MODELS = [("patrickvonplaten/t5-tiny-random", "translation_en_to_fr")]
expected_fill_mask_result = [
[
{"sequence": "<s>My name is John</s>", "score": 0.00782308354973793, "token": 610, "token_str": "ĠJohn"},
{"sequence": "<s>My name is Chris</s>", "score": 0.007475061342120171, "token": 1573, "token_str": "ĠChris"},
],
[
{"sequence": "<s>The largest city in France is Paris</s>", "score": 0.3185044229030609, "token": 2201},
{"sequence": "<s>The largest city in France is Lyon</s>", "score": 0.21112334728240967, "token": 12790},
],
]
SUMMARIZATION_KWARGS = dict(num_beams=2, min_length=2, max_length=5)
class DefaultArgumentHandlerTestCase(unittest.TestCase):
def setUp(self) -> None:
self.handler = DefaultArgumentHandler()
def test_kwargs_x(self):
mono_data = {"X": "This is a sample input"}
mono_args = self.handler(**mono_data)
self.assertTrue(isinstance(mono_args, list))
self.assertEqual(len(mono_args), 1)
multi_data = {"x": ["This is a sample input", "This is a second sample input"]}
multi_args = self.handler(**multi_data)
self.assertTrue(isinstance(multi_args, list))
self.assertEqual(len(multi_args), 2)
def test_kwargs_data(self):
mono_data = {"data": "This is a sample input"}
mono_args = self.handler(**mono_data)
self.assertTrue(isinstance(mono_args, list))
self.assertEqual(len(mono_args), 1)
multi_data = {"data": ["This is a sample input", "This is a second sample input"]}
multi_args = self.handler(**multi_data)
self.assertTrue(isinstance(multi_args, list))
self.assertEqual(len(multi_args), 2)
def test_multi_kwargs(self):
mono_data = {"data": "This is a sample input", "X": "This is a sample input 2"}
mono_args = self.handler(**mono_data)
self.assertTrue(isinstance(mono_args, list))
self.assertEqual(len(mono_args), 2)
multi_data = {
"data": ["This is a sample input", "This is a second sample input"],
"test": ["This is a sample input 2", "This is a second sample input 2"],
}
multi_args = self.handler(**multi_data)
self.assertTrue(isinstance(multi_args, list))
self.assertEqual(len(multi_args), 4)
def test_args(self):
mono_data = "This is a sample input"
mono_args = self.handler(mono_data)
self.assertTrue(isinstance(mono_args, list))
self.assertEqual(len(mono_args), 1)
mono_data = ["This is a sample input"]
mono_args = self.handler(mono_data)
self.assertTrue(isinstance(mono_args, list))
self.assertEqual(len(mono_args), 1)
multi_data = ["This is a sample input", "This is a second sample input"]
multi_args = self.handler(multi_data)
self.assertTrue(isinstance(multi_args, list))
self.assertEqual(len(multi_args), 2)
multi_data = ["This is a sample input", "This is a second sample input"]
multi_args = self.handler(*multi_data)
self.assertTrue(isinstance(multi_args, list))
self.assertEqual(len(multi_args), 2)
class MonoColumnInputTestCase(unittest.TestCase):
def _test_mono_column_pipeline(
self,
nlp: Pipeline,
valid_inputs: List,
output_keys: Iterable[str],
invalid_inputs: List = [None],
expected_multi_result: Optional[List] = None,
expected_check_keys: Optional[List[str]] = None,
**kwargs,
):
self.assertIsNotNone(nlp)
mono_result = nlp(valid_inputs[0], **kwargs)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], (dict, list))
if isinstance(mono_result[0], list):
mono_result = mono_result[0]
for key in output_keys:
self.assertIn(key, mono_result[0])
multi_result = [nlp(input) for input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
if expected_multi_result is not None:
for result, expect in zip(multi_result, expected_multi_result):
for key in expected_check_keys or []:
self.assertEqual(
set([o[key] for o in result]), set([o[key] for o in expect]),
)
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in output_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, invalid_inputs)
@require_torch
def test_torch_ner(self):
mandatory_keys = {"entity", "word", "score"}
for model_name in NER_FINETUNED_MODELS:
nlp = pipeline(task="ner", model=model_name, tokenizer=model_name)
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys)
@require_torch
def test_ner_grouped(self):
mandatory_keys = {"entity_group", "word", "score"}
for model_name in NER_FINETUNED_MODELS:
nlp = pipeline(task="ner", model=model_name, tokenizer=model_name, grouped_entities=True)
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys)
@require_tf
def test_tf_ner(self):
mandatory_keys = {"entity", "word", "score"}
for model_name in NER_FINETUNED_MODELS:
nlp = pipeline(task="ner", model=model_name, tokenizer=model_name, framework="tf")
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys)
@require_tf
def test_tf_ner_grouped(self):
mandatory_keys = {"entity_group", "word", "score"}
for model_name in NER_FINETUNED_MODELS:
nlp = pipeline(task="ner", model=model_name, tokenizer=model_name, framework="tf", grouped_entities=True)
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys)
@require_torch
def test_torch_sentiment_analysis(self):
mandatory_keys = {"label", "score"}
for model_name in TEXT_CLASSIF_FINETUNED_MODELS:
nlp = pipeline(task="sentiment-analysis", model=model_name, tokenizer=model_name)
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys)
@require_tf
def test_tf_sentiment_analysis(self):
mandatory_keys = {"label", "score"}
for model_name in TEXT_CLASSIF_FINETUNED_MODELS:
nlp = pipeline(task="sentiment-analysis", model=model_name, tokenizer=model_name, framework="tf")
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys)
@require_torch
def test_torch_feature_extraction(self):
for model_name in FEATURE_EXTRACT_FINETUNED_MODELS:
nlp = pipeline(task="feature-extraction", model=model_name, tokenizer=model_name)
self._test_mono_column_pipeline(nlp, VALID_INPUTS, {})
@require_tf
def test_tf_feature_extraction(self):
for model_name in FEATURE_EXTRACT_FINETUNED_MODELS:
nlp = pipeline(task="feature-extraction", model=model_name, tokenizer=model_name, framework="tf")
self._test_mono_column_pipeline(nlp, VALID_INPUTS, {})
@require_torch
def test_torch_fill_mask(self):
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
invalid_inputs = [
"This is <mask> <mask>" # More than 1 mask_token in the input is not supported
"This is" # No mask_token is not supported
]
for model_name in FILL_MASK_FINETUNED_MODELS:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="pt", topk=2,)
self._test_mono_column_pipeline(
nlp, valid_inputs, mandatory_keys, invalid_inputs, expected_check_keys=["sequence"]
)
@require_tf
def test_tf_fill_mask(self):
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
invalid_inputs = [
"This is <mask> <mask>" # More than 1 mask_token in the input is not supported
"This is" # No mask_token is not supported
]
for model_name in FILL_MASK_FINETUNED_MODELS:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf", topk=2,)
self._test_mono_column_pipeline(
nlp, valid_inputs, mandatory_keys, invalid_inputs, expected_check_keys=["sequence"]
)
@require_torch
@slow
def test_torch_fill_mask_results(self):
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
for model_name in LARGE_FILL_MASK_FINETUNED_MODELS:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="pt", topk=2,)
self._test_mono_column_pipeline(
nlp,
valid_inputs,
mandatory_keys,
expected_multi_result=expected_fill_mask_result,
expected_check_keys=["sequence"],
)
@require_tf
@slow
def test_tf_fill_mask_results(self):
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
for model_name in LARGE_FILL_MASK_FINETUNED_MODELS:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf", topk=2)
self._test_mono_column_pipeline(
nlp,
valid_inputs,
mandatory_keys,
expected_multi_result=expected_fill_mask_result,
expected_check_keys=["sequence"],
)
@require_torch
def test_torch_summarization(self):
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["summary_text"]
for model in SUMMARIZATION_FINETUNED_MODELS:
nlp = pipeline(task="summarization", model=model, tokenizer=model)
self._test_mono_column_pipeline(
nlp, VALID_INPUTS, mandatory_keys, invalid_inputs=invalid_inputs, **SUMMARIZATION_KWARGS
)
@slow
@require_torch
def test_integration_torch_summarization(self):
nlp = pipeline(task="summarization", device=DEFAULT_DEVICE_NUM)
cnn_article = ' (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.'
expected_cnn_summary = " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move, says governments seeking to penalize Palestine should end pressure ."
result = nlp(cnn_article)
self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
@slow
@require_tf
def test_tf_summarization(self):
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["summary_text"]
for model_name in TF_SUMMARIZATION_FINETUNED_MODELS:
nlp = pipeline(task="summarization", model=model_name, tokenizer=model_name, framework="tf",)
self._test_mono_column_pipeline(
nlp, VALID_INPUTS, mandatory_keys, invalid_inputs=invalid_inputs, **SUMMARIZATION_KWARGS
)
@require_torch
def test_torch_translation(self):
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["translation_text"]
for model_name, task in TRANSLATION_FINETUNED_MODELS:
nlp = pipeline(task=task, model=model_name, tokenizer=model_name)
self._test_mono_column_pipeline(
nlp, VALID_INPUTS, mandatory_keys, invalid_inputs,
)
@require_tf
@slow
def test_tf_translation(self):
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["translation_text"]
for model, task in TF_TRANSLATION_FINETUNED_MODELS:
nlp = pipeline(task=task, model=model, tokenizer=model, framework="tf")
self._test_mono_column_pipeline(nlp, VALID_INPUTS, mandatory_keys, invalid_inputs=invalid_inputs)
@require_torch
def test_torch_text_generation(self):
for model_name in TEXT_GENERATION_FINETUNED_MODELS:
nlp = pipeline(task="text-generation", model=model_name, tokenizer=model_name, framework="pt")
self._test_mono_column_pipeline(nlp, VALID_INPUTS, {})
@require_tf
def test_tf_text_generation(self):
for model_name in TEXT_GENERATION_FINETUNED_MODELS:
nlp = pipeline(task="text-generation", model=model_name, tokenizer=model_name, framework="tf")
self._test_mono_column_pipeline(nlp, VALID_INPUTS, {})
QA_FINETUNED_MODELS = ["sshleifer/tiny-distilbert-base-cased-distilled-squad"]
class QAPipelineTests(unittest.TestCase):
def _test_qa_pipeline(self, nlp):
output_keys = {"score", "answer", "start", "end"}
valid_inputs = [
{"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
{
"question": "In what field is HuggingFace working ?",
"context": "HuggingFace is a startup based in New-York founded in Paris which is trying to solve NLP.",
},
]
invalid_inputs = [
{"question": "", "context": "This is a test to try empty question edge case"},
{"question": None, "context": "This is a test to try empty question edge case"},
{"question": "What is does with empty context ?", "context": ""},
{"question": "What is does with empty context ?", "context": None},
]
self.assertIsNotNone(nlp)
mono_result = nlp(valid_inputs[0])
self.assertIsInstance(mono_result, dict)
for key in output_keys:
self.assertIn(key, mono_result)
multi_result = nlp(valid_inputs)
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], dict)
for result in multi_result:
for key in output_keys:
self.assertIn(key, result)
for bad_input in invalid_inputs:
self.assertRaises(Exception, nlp, bad_input)
self.assertRaises(Exception, nlp, invalid_inputs)
@require_torch
def test_torch_question_answering(self):
for model_name in QA_FINETUNED_MODELS:
nlp = pipeline(task="question-answering", model=model_name, tokenizer=model_name)
self._test_qa_pipeline(nlp)
@require_tf
def test_tf_question_answering(self):
for model_name in QA_FINETUNED_MODELS:
nlp = pipeline(task="question-answering", model=model_name, tokenizer=model_name, framework="tf")
self._test_qa_pipeline(nlp)
class PipelineCommonTests(unittest.TestCase):
pipelines = SUPPORTED_TASKS.keys()
@slow
@require_tf
def test_tf_defaults(self):
# Test that pipelines can be correctly loaded without any argument
for task in self.pipelines:
with self.subTest(msg="Testing TF defaults with TF and {}".format(task)):
pipeline(task, framework="tf")
@slow
@require_torch
def test_pt_defaults(self):
# Test that pipelines can be correctly loaded without any argument
for task in self.pipelines:
with self.subTest(msg="Testing Torch defaults with PyTorch and {}".format(task)):
pipeline(task, framework="pt")
| 20,789 | 49.096386 | 3,645 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_reformer.py | # coding=utf-8 # Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
from transformers import (
ReformerConfig,
ReformerForMaskedLM,
ReformerModel,
ReformerModelWithLMHead,
ReformerTokenizer,
ReformerLayer,
ReformerForQuestionAnswering,
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
import torch
class ReformerModelTester:
def __init__(
self,
parent,
batch_size=None,
seq_length=None,
is_training=None,
is_decoder=None,
use_input_mask=None,
use_labels=None,
vocab_size=None,
attention_head_size=None,
hidden_size=None,
num_attention_heads=None,
local_attn_chunk_length=None,
local_num_chunks_before=None,
local_num_chunks_after=None,
num_buckets=None,
num_hashes=1,
lsh_attn_chunk_length=None,
lsh_num_chunks_before=None,
lsh_num_chunks_after=None,
chunk_size_lm_head=None,
chunk_size_feed_forward=None,
feed_forward_size=None,
hidden_act=None,
hidden_dropout_prob=None,
local_attention_probs_dropout_prob=None,
lsh_attention_probs_dropout_prob=None,
max_position_embeddings=None,
initializer_range=None,
axial_norm_std=None,
layer_norm_eps=None,
axial_pos_embds=None,
axial_pos_shape=None,
axial_pos_embds_dim=None,
attn_layers=None,
pad_token_id=None,
eos_token_id=None,
scope=None,
hash_seed=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.is_decoder = is_decoder
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.attention_head_size = attention_head_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = len(attn_layers)
self.local_attn_chunk_length = local_attn_chunk_length
self.local_num_chunks_after = local_num_chunks_after
self.local_num_chunks_before = local_num_chunks_before
self.num_hashes = num_hashes
self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
self.lsh_attn_chunk_length = lsh_attn_chunk_length
self.lsh_num_chunks_after = lsh_num_chunks_after
self.lsh_num_chunks_before = lsh_num_chunks_before
self.hidden_act = hidden_act
self.feed_forward_size = feed_forward_size
self.hidden_dropout_prob = hidden_dropout_prob
self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.axial_pos_embds = axial_pos_embds
self.axial_pos_shape = tuple(axial_pos_shape)
self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
self.axial_norm_std = axial_norm_std
self.chunk_size_lm_head = chunk_size_lm_head
self.chunk_size_feed_forward = chunk_size_feed_forward
self.scope = scope
self.attn_layers = attn_layers
self.pad_token_id = pad_token_id
self.hash_seed = hash_seed
attn_chunk_length = local_attn_chunk_length if local_attn_chunk_length is not None else lsh_attn_chunk_length
num_chunks_after = local_num_chunks_after if local_num_chunks_after is not None else lsh_num_chunks_after
num_chunks_before = local_num_chunks_before if local_num_chunks_before is not None else lsh_num_chunks_before
self.encoder_seq_length = seq_length // attn_chunk_length + (self.seq_length % attn_chunk_length != 0)
self.key_length = (num_chunks_before + num_chunks_after + 1) * attn_chunk_length
self.chunk_length = attn_chunk_length
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
choice_labels = None
if self.use_labels:
choice_labels = ids_tensor([self.batch_size], 2)
config = ReformerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
feed_forward_size=self.feed_forward_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
local_attention_probs_dropout_prob=self.local_attention_probs_dropout_prob,
lsh_attention_probs_dropout_prob=self.lsh_attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
is_decoder=self.is_decoder,
axial_pos_embds=self.axial_pos_embds,
axial_pos_shape=self.axial_pos_shape,
axial_pos_embds_dim=self.axial_pos_embds_dim,
local_attn_chunk_length=self.local_attn_chunk_length,
local_num_chunks_after=self.local_num_chunks_after,
local_num_chunks_before=self.local_num_chunks_before,
num_hashes=self.num_hashes,
num_buckets=self.num_buckets,
lsh_attn_chunk_length=self.lsh_attn_chunk_length,
lsh_num_chunks_after=self.lsh_num_chunks_after,
lsh_num_chunks_before=self.lsh_num_chunks_before,
attn_layers=self.attn_layers,
pad_token_id=self.pad_token_id,
hash_seed=self.hash_seed,
)
return (
config,
input_ids,
input_mask,
choice_labels,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_reformer_model(self, config, input_ids, input_mask, choice_labels):
model = ReformerModel(config=config)
model.to(torch_device)
model.eval()
(sequence_output,) = model(input_ids, attention_mask=input_mask)
(sequence_output,) = model(input_ids)
result = {
"sequence_output": sequence_output,
}
# 2 * hidden_size because we use reversible resnet layers
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, 2 * self.hidden_size],
)
def create_and_check_reformer_model_with_lm_backward(self, config, input_ids, input_mask, choice_labels):
model = ReformerModelWithLMHead(config=config)
model.to(torch_device)
model.eval()
loss = model(input_ids, attention_mask=input_mask, labels=input_ids)[0]
loss.backward()
def create_and_check_reformer_with_lm(self, config, input_ids, input_mask, choice_labels):
model = ReformerModelWithLMHead(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(input_ids, attention_mask=input_mask, labels=input_ids)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
self.check_loss_output(result)
def create_and_check_reformer_with_mlm(self, config, input_ids, input_mask, choice_labels):
config.is_decoder = False
model = ReformerForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(input_ids, attention_mask=input_mask, labels=input_ids)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
self.check_loss_output(result)
def create_and_check_reformer_model_with_attn_mask(
self, config, input_ids, input_mask, choice_labels, is_decoder=False
):
# no special position embeddings
config.axial_pos_embds = False
config.is_decoder = is_decoder
if self.lsh_attn_chunk_length is not None:
# need to set chunk length equal sequence length to be certain that chunking works
config.lsh_attn_chunk_length = self.seq_length
model = ReformerModel(config=config)
model.to(torch_device)
model.eval()
# set all position encodings to zero so that postions don't matter
with torch.no_grad():
embedding = model.embeddings.position_embeddings.embedding
embedding.weight = torch.nn.Parameter(torch.zeros(embedding.weight.shape).to(torch_device))
embedding.weight.requires_grad = False
half_seq_len = self.seq_length // 2
roll = self.chunk_length
half_input_ids = input_ids[:, :half_seq_len]
# normal padded
attn_mask = torch.cat([torch.ones_like(half_input_ids), torch.zeros_like(half_input_ids)], dim=-1,)
input_ids_padded = torch.cat(
[half_input_ids, ids_tensor((self.batch_size, half_seq_len), self.vocab_size)], dim=-1,
)
# shifted padded
input_ids_roll = torch.cat(
[half_input_ids, ids_tensor((self.batch_size, half_seq_len), self.vocab_size)], dim=-1,
)
input_ids_roll = torch.roll(input_ids_roll, roll, dims=-1)
attn_mask_roll = torch.roll(attn_mask, roll, dims=-1)
output_padded = model(input_ids_padded, attention_mask=attn_mask)[0][:, :half_seq_len]
output_padded_rolled = model(input_ids_roll, attention_mask=attn_mask_roll)[0][:, roll : half_seq_len + roll]
self.parent.assertTrue(torch.allclose(output_padded, output_padded_rolled, atol=1e-3))
def create_and_check_reformer_layer_dropout_seed(
self, config, input_ids, input_mask, choice_labels, is_decoder=False
):
config.is_decoder = is_decoder
layer = ReformerLayer(config).to(torch_device)
layer.train()
shape = (
self.batch_size,
self.seq_length,
config.hidden_size,
) # Batch x SeqLen x hiddenSize
# get random tensors
hidden_states = floats_tensor(shape)
prev_attn_output = floats_tensor(shape)
# now the random seeds for attention and feed forward is initialized
# forward tensors with dropout
layer_outputs = layer(prev_attn_output, hidden_states, attention_mask=input_mask)
next_attn_output = layer_outputs.attn_output
next_hidden_states = layer_outputs.hidden_states
torch.manual_seed(layer.attention_seed)
attn_outputs = layer.attention(hidden_states, attention_mask=input_mask)
self.parent.assertTrue(
torch.allclose(prev_attn_output + attn_outputs.hidden_states, next_attn_output, atol=1e-3,)
)
torch.manual_seed(layer.feed_forward_seed)
feed_forward_hidden_states = layer.feed_forward(next_attn_output)
self.parent.assertTrue(
torch.allclose(next_hidden_states, hidden_states + feed_forward_hidden_states, atol=1e-3,)
)
def create_and_check_reformer_feed_forward_chunking(self, config, input_ids, input_mask, choice_labels):
torch.manual_seed(0)
model = ReformerModel(config=config)
model.to(torch_device)
model.eval()
hidden_states_no_chunk = model(input_ids, attention_mask=input_mask)[0]
config.chunk_size_lm_head = 1
config.chunk_size_feed_forward = 1
torch.manual_seed(0)
model = ReformerModel(config=config)
model.to(torch_device)
model.eval()
hidden_states_with_chunk = model(input_ids, attention_mask=input_mask)[0]
self.parent.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
def create_and_check_reformer_feed_backward_chunking(self, config, input_ids, input_mask, choice_labels):
if not self.is_training:
return
# disable dropout
config.hidden_dropout_prob = 0
config.local_attention_probs_dropout_prob = 0
config.lsh_attention_probs_dropout_prob = 0
torch.manual_seed(0)
model = ReformerModelWithLMHead(config=config)
model.to(torch_device)
model.train()
model.zero_grad()
loss_no_chunk, output_no_chunk = model(input_ids, labels=input_ids, attention_mask=input_mask)[:2]
loss_no_chunk.backward()
grad_slice_word_no_chunk = model.reformer.embeddings.word_embeddings.weight.grad[0, :5]
grad_slice_position_factor_1_no_chunk = model.reformer.embeddings.position_embeddings.weights[0][1, 0, -5:]
grad_slice_position_factor_2_no_chunk = model.reformer.embeddings.position_embeddings.weights[1][0, 1, :5]
config.chunk_size_lm_head = 1
config.chunk_size_feed_forward = 1
torch.manual_seed(0)
model = ReformerModelWithLMHead(config=config)
model.to(torch_device)
model.train()
model.zero_grad()
loss_chunk, output_chunk = model(input_ids, labels=input_ids, attention_mask=input_mask)[:2]
loss_chunk.backward()
grad_slice_word_chunk = model.reformer.embeddings.word_embeddings.weight.grad[0, :5]
grad_slice_position_factor_1_chunk = model.reformer.embeddings.position_embeddings.weights[0][1, 0, -5:]
grad_slice_position_factor_2_chunk = model.reformer.embeddings.position_embeddings.weights[1][0, 1, :5]
self.parent.assertTrue(torch.allclose(loss_chunk, loss_no_chunk, atol=1e-3))
self.parent.assertTrue(torch.allclose(grad_slice_word_no_chunk, grad_slice_word_chunk, atol=1e-3))
self.parent.assertTrue(
torch.allclose(grad_slice_position_factor_1_chunk, grad_slice_position_factor_1_no_chunk, atol=1e-3)
)
self.parent.assertTrue(
torch.allclose(grad_slice_position_factor_2_chunk, grad_slice_position_factor_2_no_chunk, atol=1e-3)
)
def create_and_check_reformer_random_seed(self, config, input_ids, input_mask, choice_labels):
layer = ReformerLayer(config).to(torch_device)
layer.train()
shape = (
self.batch_size,
self.seq_length,
config.hidden_size,
) # Batch x SeqLen x hiddenSize
hidden_states = floats_tensor(shape)
attn_output = floats_tensor(shape)
seeds = []
for _ in range(100):
layer_outputs = layer(attn_output, hidden_states, attention_mask=input_mask)
attn_output = layer_outputs.attn_output
hidden_states = layer_outputs.hidden_states
torch.manual_seed(layer.attention_seed)
seeds.append(layer.attention_seed)
self.parent.assertGreater(len(set(seeds)), 70)
seeds = []
for _ in range(100):
layer_outputs = layer(attn_output, hidden_states, attention_mask=input_mask)
attn_output = layer_outputs.attn_output
hidden_states = layer_outputs.hidden_states
torch.manual_seed(layer.feed_forward_seed)
seeds.append(layer.feed_forward_seed)
self.parent.assertGreater(len(set(seeds)), 70)
def create_and_check_reformer_model_fp16_forward(self, config, input_ids, input_mask, choice_labels):
model = ReformerModel(config=config)
model.to(torch_device)
model.half()
model.eval()
output = model(input_ids, attention_mask=input_mask)[0]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_reformer_model_fp16_generate(self, config, input_ids, input_mask, choice_labels):
model = ReformerModelWithLMHead(config=config)
model.to(torch_device)
model.half()
model.eval()
output = model.generate(input_ids, attention_mask=input_mask, do_sample=False)
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_reformer_no_chunking(self, config, input_ids, input_mask, choice_labels):
# force chunk length to be bigger than input_ids
config.lsh_attn_chunk_length = 2 * input_ids.shape[-1]
config.local_attn_chunk_length = 2 * input_ids.shape[-1]
model = ReformerModelWithLMHead(config=config)
model.to(torch_device)
model.eval()
output_logits = model(input_ids, attention_mask=input_mask)[0]
self.parent.assertTrue(output_logits.shape[1] == input_ids.shape[-1])
def create_and_check_longformer_for_question_answering(self, config, input_ids, input_mask, choice_labels):
model = ReformerForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids, attention_mask=input_mask, start_positions=choice_labels, end_positions=choice_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, choice_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
class ReformerTesterMixin:
"""
Reformer Local and Reformer LSH run essentially the same tests
"""
def test_config(self):
self.config_tester.run_common_tests()
def test_reformer_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_model(*config_and_inputs)
def test_reformer_lm_model_backward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_model_with_lm_backward(*config_and_inputs)
def test_reformer_model_attn_masking(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_model_with_attn_mask(*config_and_inputs, is_decoder=True)
self.model_tester.create_and_check_reformer_model_with_attn_mask(*config_and_inputs, is_decoder=False)
def test_reformer_with_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_with_lm(*config_and_inputs)
def test_reformer_with_mlm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_with_mlm(*config_and_inputs)
def test_reformer_layer_training_dropout(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_layer_dropout_seed(*config_and_inputs, is_decoder=True)
self.model_tester.create_and_check_reformer_layer_dropout_seed(*config_and_inputs, is_decoder=False)
def test_reformer_chunking_forward_equality(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_feed_forward_chunking(*config_and_inputs)
def test_reformer_chunking_backward_equality(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_feed_backward_chunking(*config_and_inputs)
def test_reformer_no_chunking(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_no_chunking(*config_and_inputs)
@slow
def test_dropout_random_seed_is_changing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_random_seed(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_reformer_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_model_fp16_forward(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_reformer_model_fp16_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_reformer_model_fp16_generate(*config_and_inputs)
@require_multigpu
def test_multigpu_data_parallel_forward(self):
# Opt-out of this test.
pass
@require_torch
class ReformerLocalAttnModelTest(ReformerTesterMixin, ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(ReformerModel, ReformerModelWithLMHead, ReformerForQuestionAnswering) if is_torch_available() else ()
)
all_generative_model_classes = (ReformerModelWithLMHead,) if is_torch_available() else ()
test_pruning = False
test_headmasking = False
test_torchscript = False
def prepare_kwargs(self):
return {
"batch_size": 13,
"seq_length": 32,
"is_training": True,
"is_decoder": True,
"use_input_mask": True,
"use_labels": True,
"vocab_size": 32,
"attention_head_size": 16,
"hidden_size": 32,
"num_attention_heads": 2,
"local_attn_chunk_length": 4,
"local_num_chunks_before": 1,
"local_num_chunks_after": 0,
"chunk_size_lm_head": 0,
"chunk_size_feed_forward": 0,
"feed_forward_size": 32,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"local_attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"initializer_range": 0.02,
"axial_norm_std": 1.0,
"layer_norm_eps": 1e-12,
"axial_pos_embds": True,
"axial_pos_shape": [4, 8],
"axial_pos_embds_dim": [16, 16],
"attn_layers": ["local", "local", "local", "local"],
"pad_token_id": 0,
"eos_token_id": 2,
"scope": None,
"hash_seed": 0,
}
def setUp(self):
tester_kwargs = self.prepare_kwargs()
self.model_tester = ReformerModelTester(self, **tester_kwargs)
self.config_tester = ConfigTester(self, config_class=ReformerConfig, hidden_size=37)
@slow
def test_model_from_pretrained(self):
for model_name in REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ReformerModelWithLMHead.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class ReformerLSHAttnModelTest(ReformerTesterMixin, ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(ReformerModel, ReformerModelWithLMHead, ReformerForQuestionAnswering) if is_torch_available() else ()
)
all_generative_model_classes = (ReformerModelWithLMHead,) if is_torch_available() else ()
test_pruning = False
test_headmasking = False
test_torchscript = False
def prepare_kwargs(self):
return {
"batch_size": 13,
"seq_length": 13,
"use_input_mask": True,
"use_labels": True,
"is_training": False,
"is_decoder": True,
"vocab_size": 32,
"attention_head_size": 16,
"hidden_size": 64,
"num_attention_heads": 2,
"num_buckets": 2,
"num_hashes": 4,
"lsh_attn_chunk_length": 4,
"lsh_num_chunks_before": 2,
"lsh_num_chunks_after": 3,
"chunk_size_lm_head": 5,
"chunk_size_feed_forward": 6,
"feed_forward_size": 32,
"hidden_act": "relu",
"hidden_dropout_prob": 0.1,
"lsh_attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"initializer_range": 0.02,
"axial_norm_std": 1.0,
"layer_norm_eps": 1e-12,
"axial_pos_embds": True,
"axial_pos_shape": [4, 8],
"axial_pos_embds_dim": [16, 48],
"attn_layers": ["lsh", "lsh", "lsh", "lsh"],
"pad_token_id": 0,
"eos_token_id": 2,
"scope": None,
"hash_seed": 0,
}
def setUp(self):
tester_kwargs = self.prepare_kwargs()
self.model_tester = ReformerModelTester(self, **tester_kwargs)
self.config_tester = ConfigTester(self, config_class=ReformerConfig, hidden_size=37)
@require_torch
class ReformerIntegrationTests(unittest.TestCase):
"""
These integration tests test the current layer activations and gradients againts the output of the Hugging Face Reformer model at time of integration: 29/04/2020. During integration, the model was tested against the output of the official Trax ReformerLM model for various cases ("lsh" only, "local" only, masked / non-masked, different chunk length, ....). In order to recover the original trax integration tests, one should use patrickvonplaten's fork of trax and the code that lives on the branch `branch_to_save_trax_integration_tests`.
"""
def _get_basic_config_and_input(self):
config = {
"vocab_size": 320,
"attention_head_size": 8,
"hidden_size": 16,
"num_attention_heads": 2,
"num_buckets": 2,
"num_hashes": 4,
"lsh_attn_chunk_length": 4,
"local_attn_chunk_length": 4,
"lsh_num_chunks_before": 1,
"lsh_num_chunks_after": 0,
"local_num_chunks_before": 1,
"local_num_chunks_after": 0,
"chunk_size_lm_head": 0,
"chunk_size_feed_forward": 0,
"feed_forward_size": 32,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"lsh_attention_probs_dropout_prob": 0.0,
"local_attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 32,
"initializer_range": 0.02,
"axial_norm_std": 1.0,
"layer_norm_eps": 1e-12,
"sinusoidal_pos_embds": False,
"axial_pos_embds": True,
"axial_pos_shape": [4, 8],
"axial_pos_embds_dim": [8, 8],
"hash_seed": 0,
"is_decoder": True,
}
return config
def _get_hidden_states(self):
return torch.tensor(
[
[
[
1.90826353e00,
-1.45999730e00,
-6.20405462e-01,
1.52503433e00,
-3.64464232e-01,
-8.27359235e-01,
8.39670803e-01,
2.44492178e-01,
4.98332758e-01,
2.69175139e00,
-7.08081422e-03,
1.04915401e00,
-1.83476661e00,
7.67220476e-01,
2.98580543e-01,
2.84803992e-02,
],
[
-2.66374286e-02,
4.33497576e-01,
3.10386309e-01,
5.46039944e-01,
-2.47292666e-04,
-7.52305019e-01,
2.39162103e-01,
7.25216186e-01,
-7.58357372e-01,
4.20635998e-01,
-4.04739919e-02,
1.59924145e-01,
2.05135748e00,
-1.15997978e00,
5.37166397e-01,
2.62873606e-01,
],
[
1.85247482e-01,
7.07046037e-01,
-6.77089715e-01,
-2.24209655e00,
-3.75307980e-02,
-8.59380874e-01,
-2.81027884e00,
1.01276376e00,
-1.69438001e00,
4.17574660e-01,
-1.49196962e00,
-1.76483717e00,
-1.94566312e-01,
-1.71183858e00,
7.72903565e-01,
-1.11557056e00,
],
[
9.46069193e-01,
1.53417623e-01,
-9.58686996e-01,
1.18126669e-01,
1.75967724e00,
1.62194590e00,
-5.74108159e-01,
6.79920443e-01,
5.44028163e-01,
2.05466114e-01,
-3.63045868e-01,
2.41865062e-01,
3.20348382e-01,
-9.05611176e-01,
-1.92690727e-01,
-1.19917547e00,
],
]
],
dtype=torch.float32,
device=torch_device,
)
def _get_attn_mask(self):
return torch.tensor([[0, 1, 0, 0]], dtype=torch.long, device=torch_device)
def _get_input_ids_and_mask(self):
mask = torch.tensor(
[
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0],
],
dtype=torch.long,
device=torch_device,
)
input_ids = torch.tensor(
[
[
89,
279,
286,
84,
194,
316,
182,
28,
283,
37,
169,
7,
253,
267,
107,
250,
44,
7,
102,
62,
3,
243,
171,
265,
302,
48,
164,
264,
148,
229,
280,
150,
],
[
9,
192,
66,
112,
163,
83,
135,
70,
224,
96,
31,
80,
196,
80,
63,
22,
85,
100,
47,
283,
0,
163,
126,
143,
195,
82,
53,
82,
18,
27,
182,
52,
],
],
dtype=torch.long,
device=torch_device,
)
return input_ids, mask
def test_lsh_layer_forward(self):
config = self._get_basic_config_and_input()
config["lsh_num_chunks_before"] = 0
config["attn_layers"] = ["lsh"]
config["is_decoder"] = False
hidden_states = self._get_hidden_states()
torch.manual_seed(0)
layer = ReformerLayer(ReformerConfig(**config)).to(torch_device)
layer.eval()
reformer_output = layer(prev_attn_output=hidden_states.clone(), hidden_states=hidden_states)
output_slice = reformer_output.hidden_states[0, 0, :5]
expected_output_slice = torch.tensor(
[1.6879, -1.3083, -0.4708, 1.3555, -0.6292], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_lsh_layer_forward_complex(self):
config = self._get_basic_config_and_input()
config["lsh_num_chunks_before"] = 0
config["attn_layers"] = ["lsh"]
config["num_buckets"] = [2, 4]
attn_mask = self._get_attn_mask()
hidden_states = self._get_hidden_states()
torch.manual_seed(0)
layer = ReformerLayer(ReformerConfig(**config)).to(torch_device)
layer.eval()
reformer_output = layer(
prev_attn_output=hidden_states.clone(), hidden_states=hidden_states, attention_mask=attn_mask,
)
output_slice = reformer_output.hidden_states[0, 0, :5]
expected_output_slice = torch.tensor(
[1.6439, -1.2306, -0.5108, 1.3006, -0.6537], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_local_layer_forward(self):
config = self._get_basic_config_and_input()
config["local_num_chunks_before"] = 0
config["attn_layers"] = ["local"]
config["is_decoder"] = False
hidden_states = self._get_hidden_states()
torch.manual_seed(0)
layer = ReformerLayer(ReformerConfig(**config)).to(torch_device)
layer.eval()
reformer_output = layer(prev_attn_output=hidden_states, hidden_states=hidden_states)
output_slice = reformer_output.hidden_states[0, 0, :5]
expected_output_slice = torch.tensor(
[1.4212, -2.0576, -0.9688, 1.4599, -0.1344], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_local_layer_forward_complex(self):
config = self._get_basic_config_and_input()
config["local_num_chunks_before"] = 0
config["attn_layers"] = ["local"]
attn_mask = self._get_attn_mask()
hidden_states = self._get_hidden_states()
torch.manual_seed(0)
layer = ReformerLayer(ReformerConfig(**config)).to(torch_device)
layer.eval()
reformer_output = layer(prev_attn_output=hidden_states, hidden_states=hidden_states, attention_mask=attn_mask,)
output_slice = reformer_output.hidden_states[0, 0, :5]
expected_output_slice = torch.tensor(
[1.4750, -2.0235, -0.9743, 1.4463, -0.1269], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_lsh_model_forward(self):
config = self._get_basic_config_and_input()
config["attn_layers"] = ["lsh", "lsh", "lsh", "lsh"]
config["num_buckets"] = [2, 4]
torch.manual_seed(0)
model = ReformerModel(ReformerConfig(**config)).to(torch_device)
model.eval()
input_ids, attn_mask = self._get_input_ids_and_mask()
hidden_states = model(input_ids=input_ids, attention_mask=attn_mask)[0]
output_slice = hidden_states[0, 0, :5]
expected_output_slice = torch.tensor(
[-0.9896, -0.9396, -1.0831, -0.0597, 0.2456], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_local_model_forward(self):
config = self._get_basic_config_and_input()
config["attn_layers"] = ["local", "local", "local", "local"]
torch.manual_seed(0)
model = ReformerModel(ReformerConfig(**config)).to(torch_device)
model.eval()
input_ids, attn_mask = self._get_input_ids_and_mask()
hidden_states = model(input_ids=input_ids, attention_mask=attn_mask)[0]
output_slice = hidden_states[0, 0, :5]
expected_output_slice = torch.tensor(
[-1.6791, 0.7171, 0.1594, 0.4063, 1.2584], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_lm_model_forward(self):
config = self._get_basic_config_and_input()
config["attn_layers"] = ["local", "lsh", "local", "lsh", "local", "lsh"]
config["num_buckets"] = [2, 4]
config["is_decoder"] = False
torch.manual_seed(0)
model = ReformerForMaskedLM(ReformerConfig(**config)).to(torch_device)
model.eval()
input_ids, attn_mask = self._get_input_ids_and_mask()
hidden_states = model(input_ids=input_ids, attention_mask=attn_mask)[0]
output_slice = hidden_states[1, -1, :5]
expected_output_slice = torch.tensor(
[0.0324, -0.0121, 0.0615, 0.0031, -0.0297], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_local_lm_model_grad(self):
config = self._get_basic_config_and_input()
config["attn_layers"] = ["local", "local", "local", "local"]
config["hidden_dropout_prob"] = 0.0
config["local_attention_probs_dropout_prob"] = 0.0
torch.manual_seed(0)
model = ReformerModelWithLMHead(ReformerConfig(**config)).to(torch_device)
model.train()
model.zero_grad()
input_ids, _ = self._get_input_ids_and_mask()
loss = model(input_ids=input_ids, labels=input_ids)[0]
self.assertTrue(torch.allclose(loss, torch.tensor(5.7786, dtype=torch.float, device=torch_device), atol=1e-3))
loss.backward()
# check last grads to cover all proable errors
grad_slice_word = model.reformer.embeddings.word_embeddings.weight.grad[0, :5]
expected_grad_slice_word = torch.tensor(
[-0.0005, 0.0001, 0.0002, 0.0003, 0.0006], dtype=torch.float, device=torch_device,
)
grad_slice_position_factor_1 = model.reformer.embeddings.position_embeddings.weights[0][1, 0, -5:]
expected_grad_slice_pos_fac_1 = torch.tensor(
[0.0037, -1.3793, -1.0231, -1.5230, -2.5306], dtype=torch.float, device=torch_device,
)
grad_slice_position_factor_2 = model.reformer.embeddings.position_embeddings.weights[1][0, 1, :5]
expected_grad_slice_pos_fac_2 = torch.tensor(
[-1.3165, 0.5168, 0.7785, 1.0811, -0.9830], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(grad_slice_word, expected_grad_slice_word, atol=1e-3))
self.assertTrue(torch.allclose(grad_slice_position_factor_1, expected_grad_slice_pos_fac_1, atol=1e-3))
self.assertTrue(torch.allclose(grad_slice_position_factor_2, expected_grad_slice_pos_fac_2, atol=1e-3))
def test_lsh_lm_model_grad(self):
config = self._get_basic_config_and_input()
config["attn_layers"] = ["lsh", "lsh", "lsh", "lsh"]
config["hidden_dropout_prob"] = 0.0
config["lsh_attention_probs_dropout_prob"] = 0.0
config["num_buckets"] = [2, 4]
config["num_hashes"] = 6
torch.manual_seed(0)
model = ReformerModelWithLMHead(ReformerConfig(**config)).to(torch_device)
model.train()
model.zero_grad()
input_ids, _ = self._get_input_ids_and_mask()
loss = model(input_ids=input_ids, labels=input_ids)[0]
self.assertTrue(torch.allclose(loss, torch.tensor(5.7819, dtype=torch.float, device=torch_device), atol=1e-3))
loss.backward()
# check last grads to cover all proable errors
grad_slice_word = model.reformer.embeddings.word_embeddings.weight.grad[0, :5]
expected_grad_slice_word = torch.tensor(
[2.6357e-05, 4.3358e-04, -8.4985e-04, 1.0094e-04, 3.8954e-04], dtype=torch.float, device=torch_device,
)
grad_slice_position_factor_1 = model.reformer.embeddings.position_embeddings.weights[0][1, 0, -5:]
expected_grad_slice_pos_fac_1 = torch.tensor(
[-0.0984, 0.6283, 0.4282, 1.2960, 0.6897], dtype=torch.float, device=torch_device,
)
grad_slice_position_factor_2 = model.reformer.embeddings.position_embeddings.weights[1][0, 1, :5]
expected_grad_slice_pos_fac_2 = torch.tensor(
[0.4626, -0.0231, -0.0172, 0.1081, 0.3805], dtype=torch.float, device=torch_device,
)
self.assertTrue(torch.allclose(grad_slice_word, expected_grad_slice_word, atol=1e-3))
self.assertTrue(torch.allclose(grad_slice_position_factor_1, expected_grad_slice_pos_fac_1, atol=1e-3))
self.assertTrue(torch.allclose(grad_slice_position_factor_2, expected_grad_slice_pos_fac_2, atol=1e-3))
@slow
def test_pretrained_generate_crime_and_punish(self):
model = ReformerModelWithLMHead.from_pretrained("google/reformer-crime-and-punishment").to(torch_device)
tokenizer = ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment")
model.eval()
input_ids = tokenizer.encode("A few months later", return_tensors="pt").to(torch_device)
output_ids = model.generate(
input_ids, max_length=50, num_beams=4, early_stopping=True, do_sample=False, num_hashes=8
)
output_text = tokenizer.decode(output_ids[0])
self.assertEqual(
output_text,
"A few months later state expression in his ideas, at the first entrance. He was positively for an inst",
)
| 43,396 | 41.256086 | 544 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_tokenization_xlm_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import slow
from transformers.tokenization_xlm_roberta import SPIECE_UNDERLINE, XLMRobertaTokenizer
from .test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
class XLMRobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMRobertaTokenizer
def setUp(self):
super().setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_full_tokenizer(self):
tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenization_base_hard_symbols(self):
symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
original_tokenizer_encodings = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
| 6,189 | 29.048544 | 207 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
GPT2Config,
GPT2Model,
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2LMHeadModel,
GPT2DoubleHeadsModel,
)
class GPT2ModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0, 1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = GPT2Config(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
sequence_output, presents = model(input_ids)
result = {
"sequence_output": sequence_output,
"presents": presents,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size],
)
self.parent.assertEqual(len(result["presents"]), config.n_layer)
def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past = outputs
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
output_from_no_past, _ = model(next_input_ids, token_type_ids=next_token_type_ids)
output_from_past, _ = model(next_tokens, token_type_ids=next_token_types, past=past)
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_gpt2_model_attention_mask_past(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask)
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1,
)
# get two different outputs
output_from_no_past, _ = model(next_input_ids, attention_mask=attn_mask)
output_from_past, _ = model(next_tokens, past=past, attention_mask=attn_mask)
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2LMHeadModel(config)
model.to(torch_device)
model.eval()
loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {"loss": loss, "lm_logits": lm_logits}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
def create_and_check_double_lm_head_model(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = GPT2DoubleHeadsModel(config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
"labels": multiple_choice_inputs_ids,
}
loss, lm_logits, mc_logits, _ = model(**inputs)
result = {"loss": loss, "lm_logits": lm_logits, "mc_logits": mc_logits}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["lm_logits"].size()), [self.batch_size, self.num_choices, self.seq_length, self.vocab_size],
)
self.parent.assertListEqual(list(result["mc_logits"].size()), [self.batch_size, self.num_choices])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
all_generative_model_classes = (
(GPT2LMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
def setUp(self):
self.model_tester = GPT2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_gpt2_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
def test_gpt2_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs)
def test_gpt2_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs)
def test_gpt2_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_gpt2_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class GPT2ModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_gpt2(self):
model = GPT2LMHeadModel.from_pretrained("gpt2")
model.to(torch_device)
input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog
expected_output_ids = [
464,
3290,
373,
1043,
287,
257,
2214,
1474,
262,
16246,
286,
2688,
290,
2688,
27262,
13,
198,
198,
464,
3290,
] # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
@slow
def test_lm_generate_distilgpt2(self):
model = GPT2LMHeadModel.from_pretrained("distilgpt2")
model.to(torch_device)
input_ids = torch.tensor([[464, 1893]], dtype=torch.long, device=torch_device) # The president
expected_output_ids = [
464,
1893,
286,
262,
1578,
1829,
11,
290,
262,
1893,
286,
262,
1578,
7526,
11,
423,
587,
287,
262,
2635,
] # The president of the United States, and the president of the United Kingdom, have been in the White
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| 15,288 | 36.199513 | 116 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_encoder_decoder.py | # coding=utf-8
# Copyright 2020 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
# TODO(PVP): this line reruns all the tests in BertModelTest; not sure whether this can be prevented
# for now only run module with pytest tests/test_modeling_encoder_decoder.py::EncoderDecoderModelTest
from .test_modeling_bert import BertModelTester
from .test_modeling_common import ids_tensor
if is_torch_available():
from transformers import BertModel, EncoderDecoderModel, EncoderDecoderConfig
from transformers.modeling_bert import BertLMHeadModel
import numpy as np
import torch
@require_torch
class EncoderDecoderModelTest(unittest.TestCase):
def prepare_config_and_inputs_bert(self):
bert_model_tester = BertModelTester(self)
encoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs()
decoder_config_and_inputs = bert_model_tester.prepare_config_and_inputs_for_decoder()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = encoder_config_and_inputs
(
decoder_config,
decoder_input_ids,
decoder_token_type_ids,
decoder_input_mask,
decoder_sequence_labels,
decoder_token_labels,
decoder_choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = decoder_config_and_inputs
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_token_type_ids": decoder_token_type_ids,
"decoder_attention_mask": decoder_input_mask,
"decoder_sequence_labels": decoder_sequence_labels,
"decoder_token_labels": decoder_token_labels,
"decoder_choice_labels": decoder_choice_labels,
"encoder_hidden_states": encoder_hidden_states,
"labels": decoder_token_labels,
}
def create_and_check_bert_encoder_decoder_model_from_pretrained_configs(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs
):
encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
self.assertTrue(encoder_decoder_config.decoder.is_decoder)
enc_dec_model = EncoderDecoderModel(encoder_decoder_config)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_bert_encoder_decoder_model(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs
):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
self.assertTrue(enc_dec_model.config.decoder.is_decoder)
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
encoder_outputs = (encoder_hidden_states,)
outputs_encoder_decoder = enc_dec_model(
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_bert_encoder_decoder_model_from_pretrained(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs
):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model}
enc_dec_model = EncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(outputs_encoder_decoder[0].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[1].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_save_and_load(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs
):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
enc_dec_model.save_pretrained(tmpdirname)
EncoderDecoderModel.from_pretrained(tmpdirname)
after_outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def create_and_check_save_and_load_encoder_decoder_model(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
**kwargs
):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as encoder_tmp_dirname, tempfile.TemporaryDirectory() as decoder_tmp_dirname:
enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname)
enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname)
EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=encoder_tmp_dirname,
decoder_pretrained_model_name_or_path=decoder_tmp_dirname,
)
after_outputs = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def check_loss_output(self, loss):
self.assertEqual(loss.size(), ())
def create_and_check_bert_encoder_decoder_model_labels(
self,
config,
input_ids,
attention_mask,
encoder_hidden_states,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
labels,
**kwargs
):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=labels,
)
mlm_loss = outputs_encoder_decoder[0]
self.check_loss_output(mlm_loss)
# check that backprop works
mlm_loss.backward()
self.assertEqual(outputs_encoder_decoder[1].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,)))
self.assertEqual(outputs_encoder_decoder[2].shape, (input_ids.shape + (config.hidden_size,)))
def create_and_check_bert_encoder_decoder_model_generate(self, input_ids, config, decoder_config, **kwargs):
encoder_model = BertModel(config)
decoder_model = BertLMHeadModel(decoder_config)
enc_dec_model = EncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
# Bert does not have a bos token id, so use pad_token_id instead
generated_output = enc_dec_model.generate(
input_ids, decoder_start_token_id=enc_dec_model.config.decoder.pad_token_id
)
self.assertEqual(generated_output.shape, (input_ids.shape[0],) + (decoder_config.max_length,))
def test_bert_encoder_decoder_model(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_bert_encoder_decoder_model(**input_ids_dict)
def test_bert_encoder_decoder_model_from_pretrained_configs(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_bert_encoder_decoder_model_from_pretrained_configs(**input_ids_dict)
def test_bert_encoder_decoder_model_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_bert_encoder_decoder_model_from_pretrained(**input_ids_dict)
def test_save_and_load_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_save_and_load(**input_ids_dict)
def test_save_and_load_from_encoder_decoder_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_save_and_load_encoder_decoder_model(**input_ids_dict)
def test_bert_encoder_decoder_model_labels(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_bert_encoder_decoder_model_labels(**input_ids_dict)
def test_bert_encoder_decoder_model_generate(self):
input_ids_dict = self.prepare_config_and_inputs_bert()
self.create_and_check_bert_encoder_decoder_model_generate(**input_ids_dict)
@slow
def test_real_bert_model_from_pretrained(self):
model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
self.assertIsNotNone(model)
@slow
def test_real_bert_model_from_pretrained_has_cross_attention(self):
model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
self.assertTrue(hasattr(model.decoder.bert.encoder.layer[0], "crossattention"))
@slow
def test_real_bert_model_save_load_from_pretrained(self):
model_2 = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
model_2.to(torch_device)
input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size)
decoder_input_ids = ids_tensor([13, 1], model_2.config.encoder.vocab_size)
attention_mask = ids_tensor([13, 5], vocab_size=2)
with torch.no_grad():
outputs = model_2(input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask,)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmp_dirname:
model_2.save_pretrained(tmp_dirname)
model_1 = EncoderDecoderModel.from_pretrained(tmp_dirname)
model_1.to(torch_device)
after_outputs = model_1(
input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
| 15,167 | 40.556164 | 124 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_common.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os.path
import random
import tempfile
import unittest
from typing import List
from transformers import is_torch_available
from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device
if is_torch_available():
import torch
import numpy as np
from transformers import (
AdaptiveEmbedding,
PretrainedConfig,
PreTrainedModel,
BertModel,
BertConfig,
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
top_k_top_p_filtering,
)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
@require_torch
class ModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_torchscript = True
test_pruning = True
test_resize_embeddings = True
test_head_masking = True
test_missing_keys = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class):
if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
return {
k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
if isinstance(v, torch.Tensor) and v.ndim != 0
else v
for k, v in inputs_dict.items()
}
return inputs_dict
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
model.to(torch_device)
with torch.no_grad():
after_outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# Make sure we don't have nans
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg="Parameter {} of model {} seems not properly initialized".format(name, model_class),
)
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 4
decoder_attention_idx = 1
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
decoder_attention_idx += 1
# Question Answering model returns start_logits and end_logits
if model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
decoder_attention_idx += 1
self.assertEqual(out_len, correct_outlen)
decoder_attentions = outputs[decoder_attention_idx]
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self_attentions = outputs[-1]
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_torchscript(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
self._create_and_check_torchscript(config, inputs_dict)
def test_torchscript_output_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
self._create_and_check_torchscript(config, inputs_dict)
def test_torchscript_output_hidden_state(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
self._create_and_check_torchscript(config, inputs_dict)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)["input_ids"] # Let's keep only input_ids
try:
traced_gpt2 = torch.jit.trace(model, inputs)
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_gpt2, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_headmasking(self):
if not self.test_head_masking:
return
global_rng.seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
global_rng.seed()
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
# Prepare head_mask
# Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior)
head_mask = torch.ones(
self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device,
)
head_mask[0, 0] = 0
head_mask[-1, :-1] = 0
head_mask.requires_grad_(requires_grad=True)
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
inputs["head_mask"] = head_mask
outputs = model(**inputs)
# Test that we can get a gradient back for importance score computation
output = sum(t.sum() for t in outputs[0])
output = output.sum()
output.backward()
multihead_outputs = head_mask.grad
attentions = outputs[-1]
# Remove Nan
for t in attentions:
self.assertLess(
torch.sum(torch.isnan(t)), t.numel() / 4
) # Check we don't have more than 25% nans (arbitrary)
attentions = [
t.masked_fill(torch.isnan(t), 0.0) for t in attentions
] # remove them (the test is less complete)
self.assertIsNotNone(multihead_outputs)
self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers)
self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0)
self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0)
self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0)
def test_head_pruning(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config=config)
model.to(torch_device)
model.eval()
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
model.prune_heads(heads_to_prune)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_save_load_from_pretrained(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config=config)
model.to(torch_device)
model.eval()
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
model.prune_heads(heads_to_prune)
with tempfile.TemporaryDirectory() as temp_dir_name:
model.save_pretrained(temp_dir_name)
model = model_class.from_pretrained(temp_dir_name)
model.to(torch_device)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_save_load_from_config_init(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
heads_to_prune = {
0: list(range(1, self.model_tester.num_attention_heads)),
-1: [0],
}
config.pruned_heads = heads_to_prune
model = model_class(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1)
def test_head_pruning_integration(self):
if not self.test_pruning:
return
for model_class in self.all_model_classes:
(config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
heads_to_prune = {0: [0], 1: [1, 2]}
config.pruned_heads = heads_to_prune
model = model_class(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
with tempfile.TemporaryDirectory() as temp_dir_name:
model.save_pretrained(temp_dir_name)
model = model_class.from_pretrained(temp_dir_name)
model.to(torch_device)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
heads_to_prune = {0: [0], 2: [1, 2]}
model.prune_heads(heads_to_prune)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs[-1]
self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1)
self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2)
self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads)
self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]})
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs[-1]
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict,) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Embedding, AdaptiveEmbedding))
model.set_input_embeddings(torch.nn.Embedding(10, 10))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
def test_correct_missing_keys(self):
if not self.test_missing_keys:
return
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
base_model_prefix = model.base_model_prefix
if hasattr(model, base_model_prefix):
with tempfile.TemporaryDirectory() as temp_dir_name:
model.base_model.save_pretrained(temp_dir_name)
model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True)
with self.subTest(msg="Missing keys for {}".format(model.__class__.__name__)):
self.assertGreater(len(loading_info["missing_keys"]), 0)
def test_tie_model_weights(self):
if not self.test_torchscript:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_same_values(layer_1, layer_2):
equal = True
for p1, p2 in zip(layer_1.weight, layer_2.weight):
if p1.data.ne(p2.data).sum() > 0:
equal = False
return equal
for model_class in self.all_model_classes:
config.torchscript = True
model_not_tied = model_class(config)
if model_not_tied.get_output_embeddings() is None:
continue
params_not_tied = list(model_not_tied.parameters())
config_tied = copy.deepcopy(config)
config_tied.torchscript = False
model_tied = model_class(config_tied)
params_tied = list(model_tied.parameters())
# Check that the embedding layer and decoding layer are the same in size and in value
self.assertGreater(len(params_not_tied), len(params_tied))
# self.assertTrue(check_same_values(embeddings, decoding))
# # Check that after modification, they remain the same.
# embeddings.weight.data.div_(2)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
# self.assertTrue(check_same_values(embeddings, decoding))
# # Check that after modification, they remain the same.
# decoding.weight.data.div_(4)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
# self.assertTrue(check_same_values(embeddings, decoding))
# Check that after resize they remain tied.
model_tied.resize_token_embeddings(config.vocab_size + 10)
params_tied_2 = list(model_tied.parameters())
self.assertGreater(len(params_not_tied), len(params_tied))
self.assertEqual(len(params_tied_2), len(params_tied))
# decoding.weight.data.mul_(20)
# # Check that the embedding layer and decoding layer are the same in size and in value
# self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
# self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]
# make sure that input_ids is at most of size 15
input_ids = input_ids[..., :15]
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
if config.bos_token_id is None:
# if bos token id is not defined, model needs input_ids
with self.assertRaises(AssertionError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(AssertionError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [
self._generate_random_bad_tokens(1, model.config),
self._generate_random_bad_tokens(2, model.config),
]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.tolist(), bad_words_ids))
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = (inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]).to(
torch_device
)
# make sure that input_ids is at most of size 15
input_ids = input_ids[..., :15]
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(AssertionError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2, num_return_sequences=2,))
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [
self._generate_random_bad_tokens(1, model.config),
self._generate_random_bad_tokens(2, model.config),
]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.tolist(), bad_words_ids))
def _generate_random_bad_tokens(self, num_bad_tokens: int, config) -> List[int]:
# special tokens cannot be bad tokens
special_tokens = [x for x in [config.bos_token_id, config.eos_token_id, config.pad_token_id] if x is not None]
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = ids_tensor((1, 1), self.model_tester.vocab_size).squeeze(0).cpu().numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
@require_multigpu
def test_multigpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params = ["head_mask"]
for k in blacklist_non_batched_params:
inputs_dict.pop(k, None)
# move input tensors to cuda:O
for k, v in inputs_dict.items():
if torch.is_tensor(v):
inputs_dict[k] = v.to(0)
for model_class in self.all_model_classes:
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = torch.nn.DataParallel(model)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
global_rng = random.Random()
def ids_tensor(shape, vocab_size, rng=None, name=None):
# Creates a random int32 tensor of the shape within the vocab size
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous()
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
@require_torch
class ModelUtilsTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = BertConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
model = BertModel.from_pretrained(model_name)
model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, PreTrainedModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True)
self.assertEqual(model.config.output_hidden_states, True)
self.assertEqual(model.config, config)
@require_torch
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p function behaves as expected
def test_top_k_top_p_filtering(self):
logits = torch.tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
],
dtype=torch.float,
device=torch_device,
)
non_inf_expected_idx = torch.tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
dtype=torch.long,
device=torch_device,
) # expected non filtered idx as noted above
non_inf_expected_output = torch.tensor(
[
8.2221,
7.3534,
8.4321,
7.4402,
9.3845,
6.2712,
8.8275,
5.4403,
7.3858,
9.6770,
], # expected non filtered values as noted above
dtype=torch.float,
device=torch_device,
)
output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")].to(device=torch_device)
non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)
self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))
| 41,854 | 41.752809 | 133 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_xlm.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
XLMConfig,
XLMModel,
XLMWithLMHeadModel,
XLMForTokenClassification,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
)
from transformers.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class XLMModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_lengths = True
self.use_token_type_ids = True
self.use_labels = True
self.gelu_activation = True
self.sinusoidal_embeddings = False
self.causal = False
self.asm = False
self.n_langs = 2
self.vocab_size = 99
self.n_special = 0
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.summary_type = "last"
self.use_proj = True
self.scope = None
self.bos_token_id = 0
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()
input_lengths = None
if self.use_input_lengths:
input_lengths = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
config = XLMConfig(
vocab_size=self.vocab_size,
n_special=self.n_special,
emb_dim=self.hidden_size,
n_layers=self.num_hidden_layers,
n_heads=self.num_attention_heads,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
gelu_activation=self.gelu_activation,
sinusoidal_embeddings=self.sinusoidal_embeddings,
asm=self.asm,
causal=self.causal,
n_langs=self.n_langs,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
summary_type=self.summary_type,
use_proj=self.use_proj,
bos_token_id=self.bos_token_id,
)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_xlm_model(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = XLMModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, lengths=input_lengths, langs=token_type_ids)
outputs = model(input_ids, langs=token_type_ids)
outputs = model(input_ids)
sequence_output = outputs[0]
result = {
"sequence_output": sequence_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_xlm_lm_head(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = XLMWithLMHeadModel(config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.vocab_size])
def create_and_check_xlm_simple_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = XLMForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
loss, start_logits, end_logits = outputs
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_xlm_qa(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = XLMForQuestionAnswering(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = outputs
outputs = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
outputs = model(
input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
(total_loss,) = outputs
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = outputs
result = {
"loss": total_loss,
"start_top_log_probs": start_top_log_probs,
"start_top_index": start_top_index,
"end_top_log_probs": end_top_log_probs,
"end_top_index": end_top_index,
"cls_logits": cls_logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["start_top_log_probs"].size()), [self.batch_size, model.config.start_n_top]
)
self.parent.assertListEqual(
list(result["start_top_index"].size()), [self.batch_size, model.config.start_n_top]
)
self.parent.assertListEqual(
list(result["end_top_log_probs"].size()),
[self.batch_size, model.config.start_n_top * model.config.end_n_top],
)
self.parent.assertListEqual(
list(result["end_top_index"].size()), [self.batch_size, model.config.start_n_top * model.config.end_n_top],
)
self.parent.assertListEqual(list(result["cls_logits"].size()), [self.batch_size])
def create_and_check_xlm_sequence_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
model = XLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
(logits,) = model(input_ids)
loss, logits = model(input_ids, labels=sequence_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.type_sequence_label_size])
def create_and_check_xlm_for_token_classification(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
config.num_labels = self.num_labels
model = XLMForTokenClassification(config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class XLMModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
def setUp(self):
self.model_tester = XLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*config_and_inputs)
def test_xlm_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs)
def test_xlm_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs)
def test_xlm_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*config_and_inputs)
def test_xlm_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs)
def test_xlm_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = XLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class XLMModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_xlm_mlm_en_2048(self):
model = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048")
model.to(torch_device)
input_ids = torch.tensor([[14, 447]], dtype=torch.long, device=torch_device) # the president
expected_output_ids = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), expected_output_ids)
| 14,239 | 32.271028 | 166 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_tf_common.py | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
import random
import tempfile
import unittest
from importlib import import_module
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import _tf_gpu_memory_limit, require_tf
if is_tf_available():
import tensorflow as tf
import numpy as np
from transformers import (
tf_top_k_top_p_filtering,
TFAdaptiveEmbedding,
TFSharedEmbeddings,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.experimental.set_virtual_device_configuration(
gpu, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.experimental.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_torchscript = True
test_pruning = True
test_resize_embeddings = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices, 1))
if isinstance(v, tf.Tensor) and v.ndim != 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size)
elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size)
elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size)
elif model_class in TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, self.model_tester.seq_length))
return inputs_dict
def test_initialization(self):
pass
# config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# configs_no_init = _config_zero_init(config)
# for model_class in self.all_model_classes:
# model = model_class(config=configs_no_init)
# for name, param in model.named_parameters():
# if param.requires_grad:
# self.assertIn(param.data.mean().item(), [0.0, 1.0],
# msg="Parameter {} of model {} seems not properly initialized".format(name, model_class))
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_pt_tf_model_equivalence(self):
if not is_torch_available():
return
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beggining
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = dict(
(name, torch.from_numpy(key.numpy()).to(torch.long))
for name, key in self._prepare_for_class(inputs_dict, model_class).items()
)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
tf_hidden_states = tfo[0].numpy()
pt_hidden_states = pto[0].numpy()
tf_nans = np.copy(np.isnan(tf_hidden_states))
pt_nans = np.copy(np.isnan(pt_hidden_states))
pt_hidden_states[tf_nans] = 0
tf_hidden_states[tf_nans] = 0
pt_hidden_states[pt_nans] = 0
tf_hidden_states[pt_nans] = 0
max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
# Debug info (remove when fixed)
if max_diff >= 2e-2:
print("===")
print(model_class)
print(config)
print(inputs_dict)
print(pt_inputs_dict)
self.assertLessEqual(max_diff, 2e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = dict(
(name, torch.from_numpy(key.numpy()).to(torch.long))
for name, key in self._prepare_for_class(inputs_dict, model_class).items()
)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
tfo = tfo[0].numpy()
pto = pto[0].numpy()
tf_nans = np.copy(np.isnan(tfo))
pt_nans = np.copy(np.isnan(pto))
pto[tf_nans] = 0
tfo[tf_nans] = 0
pto[pt_nans] = 0
tfo[pt_nans] = 0
max_diff = np.amax(np.abs(tfo - pto))
self.assertLessEqual(max_diff, 2e-2)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
for model_class in self.all_model_classes:
if self.is_encoder_decoder:
input_ids = {
"decoder_input_ids": tf.keras.Input(
batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"
),
"inputs": tf.keras.Input(batch_shape=(2, 2000), name="inputs", dtype="int32"),
}
elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
input_ids = tf.keras.Input(batch_shape=(4, 2, 2000), name="input_ids", dtype="int32")
else:
input_ids = tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32")
# Prepare our model
model = model_class(config)
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
outputs = model(self._prepare_for_class(inputs_dict, model_class)) # build the model
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test intetgration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs_dict = model(self._prepare_for_class(inputs_dict, model_class))
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs_keywords.pop("input_ids" if not self.is_encoder_decoder else "inputs", None,)
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
decoder_seq_length = (
self.model_tester.decoder_seq_length
if hasattr(self.model_tester, "decoder_seq_length")
else self.model_tester.seq_length
)
encoder_seq_length = (
self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "encoder_seq_length")
else self.model_tester.seq_length
)
decoder_key_length = (
self.model_tester.key_length if hasattr(self.model_tester, "key_length") else decoder_seq_length
)
encoder_key_length = (
self.model_tester.key_length if hasattr(self.model_tester, "key_length") else encoder_seq_length
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
attentions = [t.numpy() for t in outputs[-1]]
self.assertEqual(model.config.output_hidden_states, False)
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs[(out_len // 2) - 1]
self.assertEqual(model.config.output_hidden_states, False)
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
attentions = [t.numpy() for t in outputs[-1]]
self.assertEqual(model.config.output_hidden_states, False)
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
attentions = [t.numpy() for t in outputs[-1]]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
hidden_states = [t.numpy() for t in outputs[-1]]
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), (tf.keras.layers.Layer, TFAdaptiveEmbedding))
x = model.get_output_embeddings()
assert x is None or isinstance(x, tf.keras.layers.Layer)
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def _get_embeds(self, wte, input_ids):
# ^^ In our TF models, the input_embeddings can take slightly different forms,
# so we try a few of them.
# We used to fall back to just synthetically creating a dummy tensor of ones:
try:
x = wte(input_ids, mode="embedding")
except Exception:
try:
x = wte([input_ids], mode="embedding")
except Exception:
try:
x = wte([input_ids, None, None, None], mode="embedding")
except Exception:
if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32,)
else:
x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32,)
return x
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["inputs"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["inputs"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = self._get_embeds(wte, input_ids)
else:
inputs["inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
inputs["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
model(inputs)
def test_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
INPUT_SHAPE = [1, 10, config.hidden_size]
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
emb_old = model.get_input_embeddings()
emb_old.build(INPUT_SHAPE)
# reshape the embeddings
new_embeddings = model._get_resized_embeddings(emb_old, size)
# # check that the the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
emd_old_weights = model._get_word_embeddings(emb_old)
models_equal = True
for p1, p2 in zip(emd_old_weights.numpy(), new_embeddings.numpy()):
if np.sum(abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids
with self.assertRaises(AssertionError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(AssertionError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"] if "input_ids" in inputs_dict else inputs_dict["inputs"]
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(AssertionError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2, num_return_sequences=2,))
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, "compute_loss", None):
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label = prepared_for_class[list(prepared_for_class.keys() - inputs_dict.keys())[0]]
loss_size = tf.size(added_label)
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
input_ids = prepared_for_class.pop("input_ids")
loss = model(input_ids, **prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.getfullargspec(model.call)[0]
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {1: "input_ids"}
for label_key in label_keys:
label_key_index = signature.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with None, update the values and convert to a tuple
list_input = [None] * sorted_tuple_index_mapping[-1][0]
for index, value in sorted_tuple_index_mapping:
list_input[index - 1] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input)[0]
self.assertEqual(loss.shape, [loss_size])
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
@require_tf
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p_filtering function behaves as expected
def test_top_k_top_p_filtering(self):
logits = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
],
dtype=tf.float32,
)
non_inf_expected_idx = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.int32,
) # expected non filtered idx as noted above
non_inf_expected_output = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
dtype=tf.float32,
) # expected non filtered values as noted above
output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")]
non_inf_idx = tf.cast(
tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))), dtype=tf.int32,
)
tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
| 36,057 | 45.169014 | 130 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
from transformers import (
BertConfig,
BertModel,
BertLMHeadModel,
BertForMaskedLM,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertForMultipleChoice,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
class BertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_bert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertModel(config=config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_bert_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BertModel(config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output, pooled_output = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_bert_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BertLMHeadModel(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_bert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_bert_model_for_causal_lm_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BertLMHeadModel(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
loss, prediction_scores = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
encoder_hidden_states=encoder_hidden_states,
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_bert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
loss, seq_relationship_score = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, next_sentence_label=sequence_labels,
)
result = {
"loss": loss,
"seq_relationship_score": seq_relationship_score,
}
self.parent.assertListEqual(list(result["seq_relationship_score"].size()), [self.batch_size, 2])
self.check_loss_output(result)
def create_and_check_bert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForPreTraining(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores, seq_relationship_score = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
"seq_relationship_score": seq_relationship_score,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(list(result["seq_relationship_score"].size()), [self.batch_size, 2])
self.check_loss_output(result)
def create_and_check_bert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_bert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BertForSequenceClassification(config)
model.to(torch_device)
model.eval()
loss, logits = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_bert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def create_and_check_bert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = BertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss, logits = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class BertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
BertModel,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = BertModelTester(self)
self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_model(*config_and_inputs)
def test_bert_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_bert_model_as_decoder(*config_and_inputs)
def test_bert_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_bert_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_bert_for_causal_lm(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs)
def test_for_causal_lm_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_bert_model_for_causal_lm_as_decoder(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| 20,108 | 36.727955 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_openai.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
OpenAIGPTConfig,
OpenAIGPTModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTLMHeadModel,
OpenAIGPTDoubleHeadsModel,
)
class OpenAIGPTModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
(sequence_output,) = model(input_ids)
result = {"sequence_output": sequence_output}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size],
)
def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTLMHeadModel(config)
model.to(torch_device)
model.eval()
loss, lm_logits = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {"loss": loss, "lm_logits": lm_logits}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args):
model = OpenAIGPTDoubleHeadsModel(config)
model.to(torch_device)
model.eval()
loss, lm_logits, mc_logits = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {"loss": loss, "lm_logits": lm_logits}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel) if is_torch_available() else ()
)
all_generative_model_classes = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
def setUp(self):
self.model_tester = OpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_openai_gpt_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = OpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
model.to(torch_device)
input_ids = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=torch_device) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| 8,301 | 33.882353 | 111 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_benchmark.py | import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import (
PyTorchBenchmarkArguments,
PyTorchBenchmark,
)
@require_torch
class BenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
no_inference=False,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sshleifer/tiny-distilbert-base-uncased-finetuned-sst-2-english"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
no_inference=False,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
only_pretrain_model=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_torchscript(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
no_inference=False,
torchscript=True,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_inference_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
no_inference=False,
fp16=True,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
no_inference=True,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_train_no_configs_fp16(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
no_inference=True,
sequence_lengths=[8],
batch_sizes=[1],
fp16=True,
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
no_inference=False,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=False,
no_inference=False,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
no_inference=True,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_encoder_decoder_with_configs(self):
MODEL_ID = "sshleifer/tinier_bart"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
no_inference=True,
sequence_lengths=[8],
batch_sizes=[1],
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
no_inference=False,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(
models=[MODEL_ID],
training=True,
no_inference=False,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
no_multi_process=True,
)
benchmark = PyTorchBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
| 9,423 | 38.932203 | 91 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_roberta.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
RobertaConfig,
RobertaModel,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
)
from transformers.modeling_roberta import RobertaEmbeddings, create_position_ids_from_input_ids
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class RobertaModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_roberta_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaModel(config=config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_roberta_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_roberta_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RobertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def create_and_check_roberta_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RobertaForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss, logits = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
self.check_loss_output(result)
def create_and_check_roberta_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
RobertaForMaskedLM,
RobertaModel,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = RobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_roberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_roberta_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
""" Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
""" Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.Tensor(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
class RobertaModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = RobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_no_head(self):
model = RobertaModel.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.extract_features(input_ids)[:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_classification_head(self):
model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.large.mnli')
# roberta.eval()
# expected_tensor = roberta.predict("mnli", input_ids, return_logits=True).detach()
self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
| 15,034 | 40.648199 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_transfo_xl.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel
from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST
class TransfoXLModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.mem_len = 30
self.key_length = self.seq_length + self.mem_len
self.clamp_len = 15
self.is_training = True
self.use_labels = True
self.vocab_size = 99
self.cutoffs = [10, 50, 80]
self.hidden_size = 32
self.d_embed = 32
self.num_attention_heads = 4
self.d_head = 8
self.d_inner = 128
self.div_val = 2
self.num_hidden_layers = 5
self.scope = None
self.seed = 1
self.eos_token_id = 0
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = TransfoXLConfig(
vocab_size=self.vocab_size,
mem_len=self.mem_len,
clamp_len=self.clamp_len,
cutoffs=self.cutoffs,
d_model=self.hidden_size,
d_embed=self.d_embed,
n_head=self.num_attention_heads,
d_head=self.d_head,
d_inner=self.d_inner,
div_val=self.div_val,
n_layer=self.num_hidden_layers,
eos_token_id=self.eos_token_id,
)
return (config, input_ids_1, input_ids_2, lm_labels)
def set_seed(self):
random.seed(self.seed)
torch.manual_seed(self.seed)
def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):
model = TransfoXLModel(config)
model.to(torch_device)
model.eval()
hidden_states_1, mems_1 = model(input_ids_1)
hidden_states_2, mems_2 = model(input_ids_2, mems_1)
outputs = {
"hidden_states_1": hidden_states_1,
"mems_1": mems_1,
"hidden_states_2": hidden_states_2,
"mems_2": mems_2,
}
return outputs
def check_transfo_xl_model_output(self, result):
self.parent.assertListEqual(
list(result["hidden_states_1"].size()), [self.batch_size, self.seq_length, self.hidden_size],
)
self.parent.assertListEqual(
list(result["hidden_states_2"].size()), [self.batch_size, self.seq_length, self.hidden_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_1"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_2"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):
model = TransfoXLLMHeadModel(config)
model.to(torch_device)
model.eval()
lm_logits_1, mems_1 = model(input_ids_1)
loss_1, _, mems_1 = model(input_ids_1, labels=lm_labels)
lm_logits_2, mems_2 = model(input_ids_2, mems=mems_1)
loss_2, _, mems_2 = model(input_ids_2, labels=lm_labels, mems=mems_1)
outputs = {
"loss_1": loss_1,
"mems_1": mems_1,
"lm_logits_1": lm_logits_1,
"loss_2": loss_2,
"mems_2": mems_2,
"lm_logits_2": lm_logits_2,
}
return outputs
def check_transfo_xl_lm_head_output(self, result):
self.parent.assertListEqual(list(result["loss_1"].size()), [self.batch_size, self.seq_length - 1])
self.parent.assertListEqual(
list(result["lm_logits_1"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_1"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
self.parent.assertListEqual(list(result["loss_2"].size()), [self.batch_size, self.seq_length - 1])
self.parent.assertListEqual(
list(result["lm_logits_2"].size()), [self.batch_size, self.seq_length, self.vocab_size],
)
self.parent.assertListEqual(
list(list(mem.size()) for mem in result["mems_2"]),
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs
inputs_dict = {"input_ids": input_ids_1}
return config, inputs_dict
@require_torch
class TransfoXLModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (TransfoXLModel, TransfoXLLMHeadModel) if is_torch_available() else ()
all_generative_model_classes = (TransfoXLLMHeadModel,) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = True
def check_cutoffs_and_n_token(
self, copied_cutoffs, layer, model_embed, model, model_class, resized_value, vocab_size
):
# Check that the cutoffs were modified accordingly
for i in range(len(copied_cutoffs)):
if i < layer:
self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i])
if model_class == TransfoXLLMHeadModel:
self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i])
if i < len(model.config.cutoffs):
self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i])
else:
self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i] + resized_value)
if model_class == TransfoXLLMHeadModel:
self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i] + resized_value)
if i < len(model.config.cutoffs):
self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i] + resized_value)
self.assertEqual(model_embed.n_token, vocab_size + resized_value)
if model_class == TransfoXLLMHeadModel:
self.assertEqual(model.crit.n_token, vocab_size + resized_value)
def setUp(self):
self.model_tester = TransfoXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_transfo_xl_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs)
self.model_tester.check_transfo_xl_model_output(output_result)
def test_transfo_xl_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs)
self.model_tester.check_transfo_xl_lm_head_output(output_result)
@require_multigpu
def test_multigpu_data_parallel_forward(self):
# Opt-out of this test.
pass
@slow
def test_model_from_pretrained(self):
for model_name in TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = [emb.weight.clone() for emb in model_embed.emb_layers]
# Retrieve the cutoffs and copy them
copied_cutoffs = copy.copy(model_embed.cutoffs)
test_layers = [x for x in range(config.div_val)]
for layer in test_layers:
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10, layer)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] + 10)
# Check that the cutoffs were modified accordingly
self.check_cutoffs_and_n_token(
copied_cutoffs, layer, model_embed, model, model_class, 10, model_vocab_size
)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**inputs_dict)
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 5, layer)
self.assertEqual(model.config.vocab_size, model_vocab_size - 5)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] - 5)
# Check that the cutoffs were modified accordingly
self.check_cutoffs_and_n_token(
copied_cutoffs, layer, model_embed, model, model_class, -5, model_vocab_size
)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 5 - 1)
model(**inputs_dict)
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings[layer], model_embed.emb_layers[layer].weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# Reset model embeddings to original size
model.resize_token_embeddings(model_vocab_size, layer)
self.assertEqual(model_vocab_size, model.config.vocab_size)
self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0])
class TransfoXLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_transfo_xl_wt103(self):
model = TransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
model.to(torch_device)
input_ids = torch.tensor(
[
[
33,
1297,
2,
1,
1009,
4,
1109,
11739,
4762,
358,
5,
25,
245,
22,
1706,
17,
20098,
5,
3215,
21,
37,
1110,
3,
13,
1041,
4,
24,
603,
490,
2,
71477,
20098,
104447,
2,
20961,
1,
2604,
4,
1,
329,
3,
6224,
831,
16002,
2,
8,
603,
78967,
29546,
23,
803,
20,
25,
416,
5,
8,
232,
4,
277,
6,
1855,
4601,
3,
29546,
54,
8,
3609,
5,
57211,
49,
4,
1,
277,
18,
8,
1755,
15691,
3,
341,
25,
416,
693,
42573,
71,
17,
401,
94,
31,
17919,
2,
29546,
7873,
18,
1,
435,
23,
11011,
755,
5,
5167,
3,
7983,
98,
84,
2,
29546,
3267,
8,
3609,
4,
1,
4865,
1075,
2,
6087,
71,
6,
346,
8,
5854,
3,
29546,
824,
1400,
1868,
2,
19,
160,
2,
311,
8,
5496,
2,
20920,
17,
25,
15097,
3,
24,
24,
0,
]
],
dtype=torch.long,
device=torch_device,
)
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
expected_output_ids = [
33,
1297,
2,
1,
1009,
4,
1109,
11739,
4762,
358,
5,
25,
245,
22,
1706,
17,
20098,
5,
3215,
21,
37,
1110,
3,
13,
1041,
4,
24,
603,
490,
2,
71477,
20098,
104447,
2,
20961,
1,
2604,
4,
1,
329,
3,
6224,
831,
16002,
2,
8,
603,
78967,
29546,
23,
803,
20,
25,
416,
5,
8,
232,
4,
277,
6,
1855,
4601,
3,
29546,
54,
8,
3609,
5,
57211,
49,
4,
1,
277,
18,
8,
1755,
15691,
3,
341,
25,
416,
693,
42573,
71,
17,
401,
94,
31,
17919,
2,
29546,
7873,
18,
1,
435,
23,
11011,
755,
5,
5167,
3,
7983,
98,
84,
2,
29546,
3267,
8,
3609,
4,
1,
4865,
1075,
2,
6087,
71,
6,
346,
8,
5854,
3,
29546,
824,
1400,
1868,
2,
19,
160,
2,
311,
8,
5496,
2,
20920,
17,
25,
15097,
3,
24,
24,
0,
33,
1,
142,
1298,
188,
2,
29546,
113,
8,
3654,
4,
1,
1109,
7136,
833,
3,
13,
1645,
4,
29546,
11,
104,
7,
1,
1109,
532,
7129,
2,
10,
83507,
2,
1162,
1123,
2,
6,
7245,
10,
2,
5,
11,
104,
7,
1,
1109,
532,
7129,
2,
10,
24,
24,
10,
22,
10,
13,
770,
5863,
4,
7245,
10,
]
# In 1991, the remains of Russian Tsar Nicholas II and his family ( except for
# Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei
# Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young
# Grigori Rasputin is asked by his father and a group of men to perform magic.
# Rasputin has a vision and denounces one of the men as a horse thief. Although
# his father initially slaps him for making such an accusation, Rasputin watches
# as the man is chased outside and beaten. Twenty years later, Rasputin sees a
# vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly
# becomes famous, with people, even a bishop, begging for his blessing. In the
# early 20th century, Rasputin became a symbol of the Russian Orthodox Church.
# The image of Rasputin was used in the Russian national anthem, " Nearer, My God,
# to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit
# of Heaven "
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| 21,673 | 31.349254 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_auto.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import DUMMY_UNKWOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, require_torch, slow
if is_torch_available():
from transformers import (
AutoConfig,
BertConfig,
GPT2Config,
T5Config,
AutoModel,
BertModel,
AutoModelForPreTraining,
BertForPreTraining,
AutoModelForCausalLM,
GPT2LMHeadModel,
AutoModelWithLMHead,
AutoModelForMaskedLM,
BertForMaskedLM,
RobertaForMaskedLM,
AutoModelForSeq2SeqLM,
T5ForConditionalGeneration,
AutoModelForSequenceClassification,
BertForSequenceClassification,
AutoModelForQuestionAnswering,
BertForQuestionAnswering,
AutoModelForTokenClassification,
BertForTokenClassification,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_auto import (
MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
@require_torch
class AutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
@slow
def test_model_for_pretraining_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForPreTraining.from_pretrained(model_name)
model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
for key, value in loading_info.items():
# Only one value should not be initialized and in the missing keys.
self.assertEqual(len(value), 1 if key == "missing_keys" else 0)
@slow
def test_lmhead_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_causal_lm(self):
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = AutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, GPT2LMHeadModel)
@slow
def test_model_for_masked_lm(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, T5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
model, loading_info = AutoModelForSequenceClassification.from_pretrained(
model_name, output_loading_info=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
@slow
def test_token_classification_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForTokenClassification.from_pretrained(model_name)
model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForTokenClassification)
def test_from_pretrained_identifier(self):
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14830)
self.assertEqual(model.num_parameters(only_trainable=True), 14830)
def test_from_identifier_from_model_type(self):
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14830)
self.assertEqual(model.num_parameters(only_trainable=True), 14830)
def test_parents_and_children_in_mappings(self):
# Test that the children are placed before the parents in the mappings, as the `instanceof` will be triggered
# by the parents and will return the wrong configuration type when using auto models
mappings = (
MODEL_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
for mapping in mappings:
mapping = tuple(mapping.items())
for index, (child_config, child_model) in enumerate(mapping[1:]):
for parent_config, parent_model in mapping[: index + 1]:
with self.subTest(
msg="Testing if {} is child of {}".format(child_config.__name__, parent_config.__name__)
):
self.assertFalse(issubclass(child_config, parent_config))
self.assertFalse(issubclass(child_model, parent_model))
| 9,571 | 42.908257 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_tokenization_fast.py | import logging
import unittest
from collections import namedtuple
from itertools import takewhile
from transformers import (
BertTokenizer,
BertTokenizerFast,
DistilBertTokenizer,
GPT2Tokenizer,
GPT2TokenizerFast,
OpenAIGPTTokenizer,
PreTrainedTokenizer,
RobertaTokenizer,
TransfoXLTokenizer,
is_torch_available,
)
from transformers.testing_utils import require_torch
from transformers.tokenization_distilbert import DistilBertTokenizerFast
from transformers.tokenization_openai import OpenAIGPTTokenizerFast
from transformers.tokenization_roberta import RobertaTokenizerFast
from transformers.tokenization_transfo_xl import TransfoXLTokenizerFast
logger = logging.getLogger(__name__)
NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"]
Tokenizer = namedtuple("Tokenizer", ["name", "rust_cls", "python_cls", "vocab_key", "filter", "kwargs"])
def filter_non_english(_: Tokenizer, pretrained_name: str):
""" Filter all the model for non-english language """
return not any([lang in pretrained_name for lang in NON_ENGLISH_TAGS])
def filter_roberta_detectors(_: Tokenizer, pretrained_name: str):
return "detector" not in pretrained_name
class CommonFastTokenizerTest(unittest.TestCase):
TOKENIZERS_CLASSES = frozenset([])
def setUp(self) -> None:
with open("tests/fixtures/sample_text.txt", encoding="utf-8") as f_data:
self._data = f_data.read().replace("\n\n", "\n").strip()
def test_all_tokenizers(self):
for tok_case in self.TOKENIZERS_CLASSES:
for pretrained_name in tok_case.python_cls.pretrained_vocab_files_map[tok_case.vocab_key].keys():
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, rust class, python class, vocab key name)
if tok_case.filter is None or (
tok_case.filter is not None and tok_case.filter(tok_case, pretrained_name)
):
kwargs = dict(t for t in tok_case.kwargs) if tok_case.kwargs else {}
with self.subTest("{} ({})".format(tok_case.name, pretrained_name)):
tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = tok_case.python_cls.from_pretrained(pretrained_name, **kwargs)
self.fast_align_python(tokenizer_r, tokenizer_p, tok_case, pretrained_name)
self.fast_only(tokenizer_r)
def test_pretokenized_tokenizers(self):
for tok_case in self.TOKENIZERS_CLASSES:
for pretrained_name in tok_case.python_cls.pretrained_vocab_files_map[tok_case.vocab_key].keys():
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, rust class, python class, vocab key name)
if tok_case.filter is None or (
tok_case.filter is not None and tok_case.filter(tok_case, pretrained_name)
):
with self.subTest("{} ({})".format(tok_case.name, pretrained_name)):
tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, add_prefix_space=True)
tokenizer_p = tok_case.python_cls.from_pretrained(pretrained_name, add_prefix_space=True)
self.assert_pretokenized_inputs(tokenizer_r, tokenizer_p)
def fast_align_python(self, tokenizer_r, tokenizer_p, tok_case, pretrained_name):
# Check is_fast is set correctly
self.assertFalse(tokenizer_p.is_fast)
self.assertTrue(tokenizer_r.is_fast)
# Check that Rust and Python align
self.assert_tokenization_python_rust_equals(tokenizer_r, tokenizer_p)
self.assert_num_special_tokens_to_add_equal(tokenizer_r, tokenizer_p)
self.assert_max_length_equal(tokenizer_r, tokenizer_p)
self.assert_special_tokens_map_equal(tokenizer_r, tokenizer_p)
self.assert_embeded_special_tokens(tokenizer_r, tokenizer_p)
self.assert_padding(tokenizer_r, tokenizer_p)
self.assert_create_token_type_ids(tokenizer_r, tokenizer_p)
self.assert_prepare_for_model(tokenizer_r, tokenizer_p)
def fast_only(self, tokenizer_r):
# Ensure None raise an error
self.assertRaises(ValueError, tokenizer_r.tokenize, None)
self.assertRaises(ValueError, tokenizer_r.encode, None)
self.assertRaises(ValueError, tokenizer_r.encode_plus, None)
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, None)
self.assert_add_tokens(tokenizer_r)
self.assert_offsets_mapping(tokenizer_r)
self.assert_add_special_tokens(tokenizer_r)
self.assert_alignement_methods(tokenizer_r)
self.assert_batch_encode_dynamic_overflowing(tokenizer_r)
def assert_alignement_methods(self, tokenizer_r):
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
batch_size = 3
encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)
batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# words, tokens
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
# Assert token_to_word
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
# Assert word_to_tokens
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1)
# Assert token_to_chars
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1)
# Assert char_to_token
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
# Assert char_to_word
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
# Assert word_to_chars
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1)
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1)
def assert_tokenization_python_rust_equals(self, tokenizer_r, tokenizer_p):
# Ensure basic input match
input_p = tokenizer_p.encode_plus(self._data)
input_r = tokenizer_r.encode_plus(self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
# Ensure truncation match
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def assert_num_special_tokens_to_add_equal(self, tokenizer_r, tokenizer_p):
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False))
self.assertEqual(tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True))
def assert_max_length_equal(self, tokenizer_r, tokenizer_p):
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)
def assert_special_tokens_map_equal(self, tokenizer_r, tokenizer_p):
# Assert the set of special tokens match.
self.assertSequenceEqual(
tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items(),
)
def assert_add_tokens(self, tokenizer_r):
vocab_size = tokenizer_r.vocab_size
self.assertEqual(tokenizer_r.add_tokens(""), 0)
self.assertEqual(tokenizer_r.add_tokens("testoken"), 1)
self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 3)
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2)
self.assertRaises(
AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}
)
self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1)
self.assertEqual(
tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2
)
self.assertEqual(len(tokenizer_r), vocab_size + 8)
def assert_offsets_mapping(self, tokenizer_r):
text = "Wonderful no inspiration example with subtoken"
pair = "Along with an awesome pair"
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
tokens_with_offsets = tokenizer_r.encode_plus(
text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
def assert_batch_encode_dynamic_overflowing(self, tokenizer: PreTrainedTokenizer):
"""
When calling batch_encode with multiple sequence it can returns different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
returned_tensor = "pt" if is_torch_available() else "tf"
if not tokenizer.pad_token or tokenizer.pad_token_id < 0:
return
tokens = tokenizer.encode_plus(
"HuggingFace is solving NLP one commit at a time",
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
# Mono sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
# Multi sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time", "Very tiny input"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
def assert_pretokenized_inputs(self, tokenizer_r, tokenizer_p):
# Input string
pretokenized_input_simple = "This is a sample input".split()
pretokenized_input_pair = "This is a sample pair".split()
# Test encode for pretokenized inputs
output_r = tokenizer_r.encode(pretokenized_input_simple, is_pretokenized=True)
output_p = tokenizer_p.encode(pretokenized_input_simple, is_pretokenized=True)
self.assertEqual(output_p, output_r)
kwargs = {
"is_pretokenized": True,
"return_token_type_ids": True,
"return_attention_mask": True,
"return_overflowing_tokens": False,
"return_special_tokens_mask": True,
"return_offsets_mapping": False, # Not implemented in python tokenizers
}
batch_kwargs = {
"is_pretokenized": True,
"return_token_type_ids": True,
"return_attention_mask": True, # we have an 's' here
"return_overflowing_tokens": False,
"return_special_tokens_mask": True, # we have an 's' here
"return_offsets_mapping": False, # Not implemented in python tokenizers
}
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair]
output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test encode for pretokenized inputs pairs
output_r = tokenizer_r.encode(pretokenized_input_simple, pretokenized_input_pair, is_pretokenized=True)
output_p = tokenizer_p.encode(pretokenized_input_simple, pretokenized_input_pair, is_pretokenized=True)
self.assertEqual(output_p, output_r)
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [
pretokenized_input_simple + pretokenized_input_pair,
pretokenized_input_pair,
]
output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
def assert_create_token_type_ids(self, tokenizer_r, tokenizer_p):
input_simple = [1, 2, 3]
input_pair = [1, 2, 3]
# Generate output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def assert_build_inputs_with_special_tokens(self, tokenizer_r, tokenizer_p):
# Input string
input_simple = tokenizer_p.tokenize("This is a sample input")
input_pair = tokenizer_p.tokenize("This is a sample pair")
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
# Input tokens id
input_simple = tokenizer_p.encode("This is a sample input")
input_pair = tokenizer_p.encode("This is a sample pair")
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def assert_padding(self, tokenizer_r, tokenizer_p, max_length=15):
def assert_padded_input_match(input_r: list, input_p: list, max_length: int):
# Ensure we match max_length
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
# Ensure the number of padded tokens is the same
padded_tokens_r = list(takewhile(lambda i: i == tokenizer_r.pad_token_id, reversed(input_r)))
padded_tokens_p = list(takewhile(lambda i: i == tokenizer_p.pad_token_id, reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(input_r: dict, input_p: dict, max_length: int):
for i_r in input_r.values():
self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
len(i_r[1]), max_length
)
self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
len(i_r[1]), max_length
)
for i_r, i_p in zip(input_r["input_ids"], input_p["input_ids"]):
assert_padded_input_match(i_r, i_p, max_length)
for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]):
self.assertSequenceEqual(i_r, i_p)
# Encode - Simple input
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length")
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode("This is a simple input", padding="longest")
input_p = tokenizer_p.encode("This is a simple input", padding=True)
assert_padded_input_match(input_r, input_p, len(input_r))
# Encode - Pair input
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True)
input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest")
assert_padded_input_match(input_r, input_p, len(input_r))
# Encode_plus - Simple input
input_r = tokenizer_r.encode_plus("This is a simple input", max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus("This is a simple input", max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus("This is a simple input", max_length=max_length, padding="max_length")
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", padding=True)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True
)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length",
)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding=True,
)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], padding="longest"
)
input_p = tokenizer_p.batch_encode_plus(["This is a simple input 1", "This is a simple input 2"], padding=True)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
# Batch_encode_plus - Pair input
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding="longest",
)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus("This is a input 1")
input_p = tokenizer_r.pad(input_p)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus("This is a input 1")
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.pad(input_p)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
assert_batch_padded_input_match(input_r, input_p, max_length)
def assert_save_pretrained(self, tokenizer_r, tokenizer_p):
# Checks it save with the same files
self.assertSequenceEqual(tokenizer_r.save_vocabulary("."), tokenizer_p.save_vocabulary("."))
# Checks everything loads correctly in the same way
tokenizer_rp, tokenizer_pp = tokenizer_r.from_pretrained("."), tokenizer_p.from_pretrained(".")
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
def assert_embeded_special_tokens(self, tokenizer_r, tokenizer_p):
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(
sentence, add_special_tokens=True, return_attention_mask=False, return_token_type_ids=True
)
tokens_p = tokenizer_p.encode_plus(
sentence, add_special_tokens=True, return_attention_mask=False, return_token_type_ids=True
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
self.assertEqual(sum(tokens_r["token_type_ids"]), 0)
self.assertEqual(sum(tokens_p["token_type_ids"]), 0)
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def assert_add_special_tokens(self, tokenizer_r):
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
# pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True)
for text in ["", " "]:
# tokenize()
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add
)
# # batch_encode_plus
no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
def assert_prepare_for_model(self, tokenizer_r, tokenizer_p):
string_sequence = "Asserting that both tokenizers are equal"
python_output = tokenizer_p.prepare_for_model(tokenizer_p.encode(string_sequence))
rust_output = tokenizer_r.prepare_for_model(tokenizer_r.encode(string_sequence))
self.assertEqual(python_output, rust_output)
class WordPieceFastTokenizerTest(CommonFastTokenizerTest):
"""
Override all the specific methods to test WordPiece behavior
"""
TOKENIZERS_CLASSES = frozenset(
[
Tokenizer("Bert", BertTokenizerFast, BertTokenizer, "vocab_file", filter_non_english, None),
Tokenizer(
"DistilBert", DistilBertTokenizerFast, DistilBertTokenizer, "vocab_file", filter_non_english, None
),
]
)
def fast_only(self, tokenizer_r):
super().fast_only(tokenizer_r)
self.assert_offsets_with_special_characters(tokenizer_r)
def assert_add_special_tokens(self, tokenizer_r):
super().assert_add_special_tokens(tokenizer_r)
def assert_offsets_with_special_characters(self, tokenizer_r):
sentence = "A, naïve [MASK] AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.init_kwargs.get("do_lower_case")
expected_results = (
[
((0, 0), "[CLS]"),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), "[MASK]"),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), "[SEP]"),
]
if not do_lower_case
else [
((0, 0), "[CLS]"),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), "[MASK]"),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), "[SEP]"),
]
)
self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
class RobertaFastTokenizerTest(CommonFastTokenizerTest):
TOKENIZERS_CLASSES = frozenset(
[
Tokenizer(
"Roberta",
RobertaTokenizerFast,
RobertaTokenizer,
"vocab_file",
filter_roberta_detectors,
(("cls_token", "<s>"),),
)
]
)
def assert_embeded_special_tokens(self, tokenizer_r, tokenizer_p):
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
# token_type_ids should put 0 everywhere
self.assertEquals(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEquals(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
)
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(tokens_p, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
class NoPaddingTokenFastTokenizerMatchingTest(CommonFastTokenizerTest):
TOKENIZERS_CLASSES = [
Tokenizer("OpenAI GPT", OpenAIGPTTokenizerFast, OpenAIGPTTokenizer, "vocab_file", None, None),
Tokenizer("GPT2", GPT2TokenizerFast, GPT2Tokenizer, "vocab_file", None, [("add_prefix_space", True)]),
]
def fast_align_python(self, tokenizer_r, tokenizer_p, tok_case, pretrained_name):
# Check is_fast is set correctly
self.assertFalse(tokenizer_p.is_fast)
self.assertTrue(tokenizer_r.is_fast)
# Check that Rust and Python align
self.assert_tokenization_python_rust_equals(tokenizer_r, tokenizer_p)
self.assert_num_special_tokens_to_add_equal(tokenizer_r, tokenizer_p)
self.assert_max_length_equal(tokenizer_r, tokenizer_p)
self.assert_special_tokens_map_equal(tokenizer_r, tokenizer_p)
self.assert_embeded_special_tokens(tokenizer_r, tokenizer_p)
self.assert_padding(tokenizer_r, tokenizer_p)
# Specific for
kwargs = {}
if tok_case.kwargs is not None:
kwargs = dict(tok_case.kwargs)
tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, **kwargs)
self.assert_pretokenized_inputs(tokenizer_r, tokenizer_p)
def assert_padding(self, tokenizer_r, tokenizer_p, max_length=15):
# Simple input
s = "This is a simple input"
s2 = ["This is a simple input 1", "This is a simple input 2"]
p = ("This is a simple input", "This is a pair")
p2 = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(
ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length",
)
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(
ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length",
)
class TransfoXLFastTokenizerTest(NoPaddingTokenFastTokenizerMatchingTest):
TOKENIZERS_CLASSES = frozenset(
[Tokenizer("TransfoXL", TransfoXLTokenizerFast, TransfoXLTokenizer, "pretrained_vocab_file", None, None)]
)
@require_torch
def test_all_tokenizers(self):
super().test_all_tokenizers()
@require_torch
def test_pretokenized_tokenizers(self):
super().test_pretokenized_tokenizers()
| 43,721 | 47.851397 | 119 | py |
TextSiM | TextSiM-main/MNLI_evaluation_scripts/transformers-3.0.2/tests/test_modeling_albert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
from transformers import (
AlbertConfig,
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class AlbertModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.embedding_size = 16
self.hidden_size = 36
self.num_hidden_layers = 6
self.num_hidden_groups = 6
self.num_attention_heads = 6
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = AlbertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
num_hidden_groups=self.num_hidden_groups,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_albert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertModel(config=config)
model.to(torch_device)
model.eval()
sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids)
sequence_output, pooled_output = model(input_ids)
result = {
"sequence_output": sequence_output,
"pooled_output": pooled_output,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size])
def create_and_check_albert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertForPreTraining(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores, sop_scores = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
sentence_order_label=sequence_labels,
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
"sop_scores": sop_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.parent.assertListEqual(list(result["sop_scores"].size()), [self.batch_size, config.num_labels])
self.check_loss_output(result)
def create_and_check_albert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
loss, prediction_scores = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
result = {
"loss": loss,
"prediction_scores": prediction_scores,
}
self.parent.assertListEqual(
list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
self.check_loss_output(result)
def create_and_check_albert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
loss, start_logits, end_logits = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
result = {
"loss": loss,
"start_logits": start_logits,
"end_logits": end_logits,
}
self.parent.assertListEqual(list(result["start_logits"].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result["end_logits"].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_albert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = AlbertForSequenceClassification(config)
model.to(torch_device)
model.eval()
loss, logits = model(
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_labels])
self.check_loss_output(result)
def create_and_check_albert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = AlbertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def create_and_check_albert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = AlbertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
loss, logits = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.num_choices])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = AlbertModelTester(self)
self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_albert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_albert_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_albert_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_albert_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_albert_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_albert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_albert_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = AlbertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| 12,399 | 38.616613 | 119 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/LSTM/eval.py | """
Run evaluation with saved models.
"""
import os
import random
import argparse
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from data.loader import DataLoader
from model.rnn import RelationModel
from utils import torch_utils, scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str, help='Directory of the model.')
parser.add_argument('--model', type=str, default='best_model.pt', help='Name of the model file.')
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--dataset', type=str, default='test', help="Evaluate on dev or test.")
parser.add_argument('--out', type=str, default='', help="Save model predictions to this dir.")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
torch.manual_seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# load opt
model_file = args.model_dir + '/' + args.model
print("Loading model from {}".format(model_file))
opt = torch_utils.load_config(model_file)
model = RelationModel(opt)
model.load(model_file)
# load vocab
vocab_file = args.model_dir + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
assert opt['vocab_size'] == vocab.size, "Vocab size must match that in the saved model."
# load data
data_file = opt['data_dir'] + '/{}.json'.format(args.dataset)
print("Loading data from {} with batch size {}...".format(data_file, opt['batch_size']))
batch = DataLoader(data_file, opt['batch_size'], opt, vocab, evaluation=True)
helper.print_config(opt)
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
predictions = []
all_probs = []
for i, b in enumerate(batch):
preds, probs, _ = model.predict(b)
predictions += preds
all_probs += probs
predictions = [id2label[p] for p in predictions]
p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)
# save probability scores
if len(args.out) > 0:
helper.ensure_dir(os.path.dirname(args.out))
with open(args.out, 'wb') as outfile:
pickle.dump(all_probs, outfile)
print("Prediction scores saved to {}.".format(args.out))
print("Evaluation ended.")
| 2,380 | 30.746667 | 97 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/LSTM/train.py | """
Train a model on TACRED.
"""
import os
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import torch.nn as nn
import torch.optim as optim
from data.loader import DataLoader
from model.rnn import RelationModel
from utils import scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--vocab_dir', type=str, default='dataset/vocab')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')
parser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N embeddings.')
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--attn', dest='attn', action='store_true', help='Use attention layer.')
parser.add_argument('--no-attn', dest='attn', action='store_false')
parser.set_defaults(attn=True)
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--pe_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--optim', type=str, default='sgd', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=5, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')
parser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# make opt
opt = vars(args)
opt['num_class'] = len(constant.LABEL_TO_ID)
# load vocab
vocab_file = opt['vocab_dir'] + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
opt['vocab_size'] = vocab.size
emb_file = opt['vocab_dir'] + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == vocab.size
assert emb_matrix.shape[1] == opt['emb_dim']
# load data
print("Loading data from {} with batch size {}...".format(opt['data_dir'], opt['batch_size']))
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
vocab.save(model_save_dir + '/vocab.pkl')
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_f1")
# print model info
helper.print_config(opt)
# model
model = RelationModel(opt, emb_matrix=emb_matrix)
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
dev_f1_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = model.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = model.predict(batch)
predictions += preds
dev_loss += loss
predictions = [id2label[p] for p in predictions]
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_f1))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
model.save(model_file, epoch)
if epoch == 1 or dev_f1 > max(dev_f1_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_f1_history) > 10 and dev_f1 <= dev_f1_history[-1] and \
opt['optim'] in ['sgd', 'adagrad']:
current_lr *= opt['lr_decay']
model.update_lr(current_lr)
dev_f1_history += [dev_f1]
print("")
print("Training ended with {} epochs.".format(epoch))
| 6,721 | 40.751553 | 117 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/LSTM/utils/torch_utils.py | """
Utility functions for torch.
"""
import torch
from torch import nn, optim
from torch.optim import Optimizer
### class
class MyAdagrad(Optimizer):
"""My modification of the Adagrad optimizer that allows to specify an initial
accumulater value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial acculmulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) *\
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
### torch specific functions
def get_optimizer(name, parameters, lr):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr)
elif name in ['adagrad', 'myadagrad']:
# use my own adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1)
elif name == 'adam':
return torch.optim.Adam(parameters, betas=(0.9, 0.99)) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters) # use default lr
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
### model IO
def save(model, optimizer, opt, filename):
params = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'config': opt
}
try:
torch.save(params, filename)
except BaseException:
print("[ Warning: model saving failed. ]")
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
if model is not None:
model.load_state_dict(dump['model'])
if optimizer is not None:
optimizer.load_state_dict(dump['optimizer'])
opt = dump['config']
return model, optimizer, opt
def load_config(filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
return dump['config']
| 5,525 | 33.322981 | 106 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/LSTM/data/loader.py | """
Data loader for TACRED json files.
"""
import json
import random
import torch
import numpy as np
from utils import constant, helper, vocab
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, filename, batch_size, opt, vocab, evaluation=False):
self.batch_size = batch_size
self.opt = opt
self.vocab = vocab
self.eval = evaluation
with open(filename) as infile:
data = json.load(infile)
data = self.preprocess(data, vocab, opt)
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
self.labels = [id2label[d[-1]] for d in data]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
print("{} batches created for {}".format(len(data), filename))
def preprocess(self, data, vocab, opt):
""" Preprocess the data and convert to ids. """
processed = []
for d in data:
tokens = d['token']
if opt['lower']:
tokens = [t.lower() for t in tokens]
# anonymize tokens
ss, se = d['subj_start'], d['subj_end']
os, oe = d['obj_start'], d['obj_end']
tokens[ss:se+1] = ['SUBJ-'+d['subj_type']] * (se-ss+1)
tokens[os:oe+1] = ['OBJ-'+d['obj_type']] * (oe-os+1)
tokens = map_to_ids(tokens, vocab.word2id)
pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID)
ner = map_to_ids(d['stanford_ner'], constant.NER_TO_ID)
deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID)
l = len(tokens)
subj_positions = get_positions(d['subj_start'], d['subj_end'], l)
obj_positions = get_positions(d['obj_start'], d['obj_end'], l)
relation = constant.LABEL_TO_ID[d['relation']]
processed += [(tokens, pos, ner, deprel, subj_positions, obj_positions, relation)]
return processed
def gold(self):
""" Return gold labels as a list. """
return self.labels
def __len__(self):
#return 50
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 7
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# word dropout
if not self.eval:
words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]]
else:
words = batch[0]
# convert to tensors
words = get_long_tensor(words, batch_size)
masks = torch.eq(words, 0)
pos = get_long_tensor(batch[1], batch_size)
ner = get_long_tensor(batch[2], batch_size)
deprel = get_long_tensor(batch[3], batch_size)
subj_positions = get_long_tensor(batch[4], batch_size)
obj_positions = get_long_tensor(batch[5], batch_size)
rels = torch.LongTensor(batch[6])
return (words, masks, pos, ner, deprel, subj_positions, obj_positions, rels, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def map_to_ids(tokens, vocab):
ids = [vocab[t] if t in vocab else constant.UNK_ID for t in tokens]
return ids
def get_positions(start_idx, end_idx, length):
""" Get subj/obj position sequence. """
return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + \
list(range(1, length-end_idx))
def get_long_tensor(tokens_list, batch_size):
""" Convert list of list of tokens to a padded LongTensor. """
token_len = max(len(x) for x in tokens_list)
tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
def sort_all(batch, lens):
""" Sort all fields by descending order of lens, and return the original indices. """
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1]
def word_dropout(tokens, dropout):
""" Randomly dropout tokens (IDs) and replace them with <UNK> tokens. """
return [constant.UNK_ID if x != constant.UNK_ID and np.random.random() < dropout \
else x for x in tokens]
| 4,983 | 35.647059 | 94 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/LSTM/model/layers.py | """
Additional layers.
"""
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from utils import constant, torch_utils
class LSTMLayer(nn.Module):
""" A wrapper for LSTM with sequence packing. """
def __init__(self, emb_dim, hidden_dim, num_layers, dropout, use_cuda):
super(LSTMLayer, self).__init__()
self.rnn = nn.LSTM(emb_dim, hidden_dim, num_layers, batch_first=True, dropout=dropout)
self.use_cuda = use_cuda
def forward(self, x, x_mask, init_state):
"""
x: batch_size * feature_size * seq_len
x_mask : batch_size * seq_len
"""
x_lens = x_mask.data.eq(constant.PAD_ID).long().sum(1).squeeze()
_, idx_sort = torch.sort(lens, dim=0, descending=True)
_, idx_unsort = torch.sort(idx_sort, dim=0)
lens = list(x_lens[idx_sort])
# sort by seq lens
x = x.index_select(0, idx_sort)
rnn_input = nn.utils.rnn.pack_padded_sequence(x, lens, batch_first=True)
rnn_output, (ht, ct) = self.rnn(rnn_input, init_state)
rnn_output = nn.utils.rnn.pad_packed_sequence(rnn_output, batch_first=True)[0]
# unsort
rnn_output = rnn_output.index_select(0, idx_unsort)
ht = ht.index_select(0, idx_unsort)
ct = ct.index_select(0, idx_unsort)
return rnn_output, (ht, ct)
class PositionAwareAttention(nn.Module):
"""
A position-augmented attention layer where the attention weight is
a = T' . tanh(Ux + Vq + Wf)
where x is the input, q is the query, and f is additional position features.
"""
def __init__(self, input_size, query_size, feature_size, attn_size):
super(PositionAwareAttention, self).__init__()
self.input_size = input_size
self.query_size = query_size
self.feature_size = feature_size
self.attn_size = attn_size
self.ulinear = nn.Linear(input_size, attn_size)
self.vlinear = nn.Linear(query_size, attn_size, bias=False)
if feature_size > 0:
self.wlinear = nn.Linear(feature_size, attn_size, bias=False)
else:
self.wlinear = None
self.tlinear = nn.Linear(attn_size, 1)
self.init_weights()
def init_weights(self):
self.ulinear.weight.data.normal_(std=0.001)
self.vlinear.weight.data.normal_(std=0.001)
if self.wlinear is not None:
self.wlinear.weight.data.normal_(std=0.001)
self.tlinear.weight.data.zero_() # use zero to give uniform attention at the beginning
def forward(self, x, x_mask, q, f):
"""
x : batch_size * seq_len * input_size
q : batch_size * query_size
f : batch_size * seq_len * feature_size
"""
batch_size, seq_len, _ = x.size()
x_proj = self.ulinear(x.contiguous().view(-1, self.input_size)).view(
batch_size, seq_len, self.attn_size)
q_proj = self.vlinear(q.view(-1, self.query_size)).contiguous().view(
batch_size, self.attn_size).unsqueeze(1).expand(
batch_size, seq_len, self.attn_size)
if self.wlinear is not None:
f_proj = self.wlinear(f.view(-1, self.feature_size)).contiguous().view(
batch_size, seq_len, self.attn_size)
projs = [x_proj, q_proj, f_proj]
else:
projs = [x_proj, q_proj]
scores = self.tlinear(torch.tanh(sum(projs)).view(-1, self.attn_size)).view(
batch_size, seq_len)
# mask padding
scores.data.masked_fill_(x_mask.data, -float('inf'))
weights = F.softmax(scores, dim=1)
# weighted average input vectors
outputs = weights.unsqueeze(1).bmm(x).squeeze(1)
return outputs
| 3,783 | 36.84 | 94 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/LSTM/model/rnn.py | """
A rnn model for relation extraction, written in pytorch.
"""
import math
import numpy as np
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from utils import constant, torch_utils
from model import layers
class RelationModel(object):
""" A wrapper class for the training and evaluation of models. """
def __init__(self, opt, emb_matrix=None):
self.opt = opt
self.model = PositionAwareRNN(opt, emb_matrix)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if opt['cuda']:
self.model.cuda()
self.criterion.cuda()
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def update(self, batch):
""" Run a step of forward and backward model update. """
if self.opt['cuda']:
inputs = [b.cuda() for b in batch[:7]]
labels = batch[7].cuda()
else:
inputs = [b for b in batch[:7]]
labels = batch[7]
# step forward
self.model.train()
self.optimizer.zero_grad()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
# backward
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
loss_val = loss.data.item()
return loss_val
def predict(self, batch, unsort=True):
""" Run forward prediction. If unsort is True, recover the original order of the batch. """
if self.opt['cuda']:
inputs = [b.cuda() for b in batch[:7]]
labels = batch[7].cuda()
else:
inputs = [b for b in batch[:7]]
labels = batch[7]
orig_idx = batch[8]
# forward
self.model.eval()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
probs = F.softmax(logits, dim=1).data.cpu().numpy().tolist()
predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()
if unsort:
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx,\
predictions, probs)))]
return predictions, probs, loss.data.item()
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
def save(self, filename, epoch):
params = {
'model': self.model.state_dict(),
'config': self.opt,
'epoch': epoch
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.opt = checkpoint['config']
class PositionAwareRNN(nn.Module):
""" A sequence model for relation extraction. """
def __init__(self, opt, emb_matrix=None):
super(PositionAwareRNN, self).__init__()
self.drop = nn.Dropout(opt['dropout'])
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
if opt['pos_dim'] > 0:
self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim'],
padding_idx=constant.PAD_ID)
if opt['ner_dim'] > 0:
self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim'],
padding_idx=constant.PAD_ID)
input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\
dropout=opt['dropout'])
self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])
if opt['attn']:
self.attn_layer = layers.PositionAwareAttention(opt['hidden_dim'],
opt['hidden_dim'], 2*opt['pe_dim'], opt['attn_dim'])
self.pe_emb = nn.Embedding(constant.MAX_LEN * 2 + 1, opt['pe_dim'])
self.opt = opt
self.topn = self.opt.get('topn', 1e10)
self.use_cuda = opt['cuda']
self.emb_matrix = emb_matrix
self.init_weights()
def init_weights(self):
if self.emb_matrix is None:
self.emb.weight.data[1:,:].uniform_(-1.0, 1.0) # keep padding dimension to be 0
else:
self.emb_matrix = torch.from_numpy(self.emb_matrix)
self.emb.weight.data.copy_(self.emb_matrix)
if self.opt['pos_dim'] > 0:
self.pos_emb.weight.data[1:,:].uniform_(-1.0, 1.0)
if self.opt['ner_dim'] > 0:
self.ner_emb.weight.data[1:,:].uniform_(-1.0, 1.0)
self.linear.bias.data.fill_(0)
init.xavier_uniform_(self.linear.weight, gain=1) # initialize linear layer
if self.opt['attn']:
self.pe_emb.weight.data.uniform_(-1.0, 1.0)
# decide finetuning
if self.topn <= 0:
print("Do not finetune word embedding layer.")
self.emb.weight.requires_grad = False
elif self.topn < self.opt['vocab_size']:
print("Finetune top {} word embeddings.".format(self.topn))
self.emb.weight.register_hook(lambda x: \
torch_utils.keep_partial_grad(x, self.topn))
else:
print("Finetune all embeddings.")
def zero_state(self, batch_size):
state_shape = (self.opt['num_layers'], batch_size, self.opt['hidden_dim'])
h0 = c0 = torch.zeros(*state_shape, requires_grad=False)
if self.use_cuda:
return h0.cuda(), c0.cuda()
else:
return h0, c0
def forward(self, inputs):
words, masks, pos, ner, deprel, subj_pos, obj_pos = inputs # unpack
seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())
batch_size = words.size()[0]
# embedding lookup
word_inputs = self.emb(words)
inputs = [word_inputs]
if self.opt['pos_dim'] > 0:
inputs += [self.pos_emb(pos)]
if self.opt['ner_dim'] > 0:
inputs += [self.ner_emb(ner)]
inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input
input_size = inputs.size(2)
# rnn
h0, c0 = self.zero_state(batch_size)
inputs = nn.utils.rnn.pack_padded_sequence(inputs, seq_lens, batch_first=True)
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
outputs, output_lens = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
hidden = self.drop(ht[-1,:,:]) # get the outmost layer h_n
outputs = self.drop(outputs)
# attention
if self.opt['attn']:
# convert all negative PE numbers to positive indices
# e.g., -2 -1 0 1 will be mapped to 98 99 100 101
subj_pe_inputs = self.pe_emb(subj_pos + constant.MAX_LEN)
obj_pe_inputs = self.pe_emb(obj_pos + constant.MAX_LEN)
pe_features = torch.cat((subj_pe_inputs, obj_pe_inputs), dim=2)
final_hidden = self.attn_layer(outputs, masks, hidden, pe_features)
else:
final_hidden = hidden
logits = self.linear(final_hidden)
return logits, final_hidden
| 7,577 | 37.467005 | 99 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/SpanBERT/bert.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from transformers import BertModel
from utils import constant, torch_utils
class BERTencoder(nn.Module):
def __init__(self):
super().__init__()
in_dim = 1024
self.model = BertModel.from_pretrained("SpanBERT/spanbert-large-cased")
self.classifier = nn.Linear(in_dim, 1)
def forward(self, inputs):
words = inputs[0]
outputs = self.model(words, output_attentions=True)
# h = outputs.last_hidden_state
out = torch.sigmoid(self.classifier(outputs.pooler_output))
return outputs, out
class BERTclassifier(nn.Module):
def __init__(self, opt):
super().__init__()
in_dim = 1024
self.classifier = nn.Linear(in_dim, opt['num_class'])
self.opt = opt
def forward(self, h):
cls_out = h#pool(h, out_mask.eq(0), type=pool_type)
logits = self.classifier(cls_out)
return logits | 1,039 | 28.714286 | 79 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/SpanBERT/dataloader.py | """
Data loader for TACRED json files.
"""
import json
import random
import torch
import numpy as np
import string
from utils import constant, helper
from collections import defaultdict
from statistics import mean
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, filename, batch_size, opt, tokenizer, evaluation=False):
self.batch_size = batch_size
self.opt = opt
self.eval = evaluation
self.label2id = constant.LABEL_TO_ID
self.tokenizer = tokenizer
with open(filename) as infile:
data = json.load(infile)
data = self.preprocess(data, opt)
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.id2label = dict([(v,k) for k,v in self.label2id.items()])
self.labels = [self.id2label[d[-2]] for d in data]
self.words = [d[-1] for d in data]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
print("{} batches created for {}".format(len(self.data), filename))
def preprocess(self, data, opt):
""" Preprocess the data and convert to ids. """
processed = []
processed_rule = []
for c, d in enumerate(data):
tokens = list(d['token'])
words = list(d['token'])
for i in range(len(words)):
if words[i] == '-LRB-':
words[i] = '('
if words[i] == '-RRB-':
words[i] = ')'
if opt['lower']:
tokens = [t.lower() for t in tokens]
# anonymize tokens
ss, se = d['subj_start'], d['subj_end']
os, oe = d['obj_start'], d['obj_end']
tokens[ss:se+1] = ['[SUBJ-'+d['subj_type']+']'] * (se-ss+1)
tokens[os:oe+1] = ['[OBJ-'+d['obj_type']+']'] * (oe-os+1)
tokens = ['[CLS]'] + tokens
words = ['[CLS]'] + words
relation = self.label2id[d['relation']]
l = len(tokens)
for i in range(l):
if tokens[i] == '-LRB-':
tokens[i] = '('
if tokens[i] == '-RRB-':
tokens[i] = ')'
tokens = self.tokenizer.convert_tokens_to_ids(tokens)
processed += [(tokens, relation, words)]
return processed
def gold(self):
""" Return gold labels as a list. """
return self.labels
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# word dropout
words = batch[0]
# convert to tensors
words = get_long_tensor(words, batch_size)
# words = self.tokenizer(batch[0], is_split_into_words=True, padding=True, truncation=True, return_tensors="pt")
rels = torch.LongTensor(batch[1])#
return (words, rels, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def get_long_tensor(tokens_list, batch_size):
""" Convert list of list of tokens to a padded LongTensor. """
token_len = max(len(x) for x in tokens_list)
tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID)
for i, s in enumerate(tokens_list):
tokens[i,:len(s)] = torch.LongTensor(s)
return tokens
def sort_all(batch, lens):
""" Sort all fields by descending order of lens, and return the original indices. """
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1]
| 4,299 | 33.126984 | 120 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/SpanBERT/eval.py | """
Run evaluation with saved models.
"""
import random
import argparse
from tqdm import tqdm
import torch
from dataloader import DataLoader
from trainer import BERTtrainer
from utils import torch_utils, scorer, constant, helper
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu
from transformers import BertTokenizer
import json
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str, help='Directory of the model.')
parser.add_argument('--model', type=str, default='best_model.pt', help='Name of the model file.')
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--dataset', type=str, default='test', help="Evaluate on dev or test.")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--device', type=int, default=0, help='Word embedding dimension.')
args = parser.parse_args()
torch.manual_seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
tokenizer = BertTokenizer.from_pretrained('SpanBERT/spanbert-large-cased')
special_tokens_dict = {'additional_special_tokens': constant.ENTITY_TOKENS}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
# load opt
model_file = args.model_dir + '/' + args.model
print("Loading model from {}".format(model_file))
opt = torch_utils.load_config(model_file)
opt['device'] = args.device
trainer = BERTtrainer(opt)
trainer.encoder.model.resize_token_embeddings(len(tokenizer))
trainer.load(model_file)
# load data
data_file = opt['data_dir'] + '/{}.json'.format(args.dataset)
print("Loading data from {} with batch size {}...".format(data_file, opt['batch_size']))
batch = DataLoader(data_file, opt['batch_size'], opt, tokenizer, evaluation=True)
helper.print_config(opt)
label2id = constant.LABEL_TO_ID
id2label = dict([(v,k) for k,v in label2id.items()])
predictions = []
x = 0
exact_match = 0
other = 0
for c, b in enumerate(batch):
preds = trainer.predict(b, id2label, tokenizer)
predictions += preds
batch_size = len(preds)
output = list()
for i, p in enumerate(predictions):
predictions[i] = id2label[p]
with open("output_{}_{}_{}".format(args.model_dir.split('/')[-1], args.dataset, args.model.replace('.pt', '.json')), 'w') as f:
f.write(json.dumps(output))
p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)
print("{} set evaluate result: {:.2f}\t{:.2f}\t{:.2f}".format(args.dataset,p,r,f1))
print("Evaluation ended.")
| 2,637 | 31.567901 | 127 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/SpanBERT/train.py | import os
import sys
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from dataloader import DataLoader
from trainer import BERTtrainer
from utils import torch_utils, scorer, constant, helper
from transformers import BertTokenizer
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.set_defaults(lower=False)
parser.add_argument('--lr', type=float, default=1.0, help='Applies to sgd and adagrad.')
parser.add_argument('--num_epoch', type=int, default=100, help='Number of total training epochs.')
parser.add_argument('--batch_size', type=int, default=50, help='Training batch size.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=10, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')
parser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
parser.add_argument('--load', dest='load', action='store_true', help='Load pretrained model.')
parser.add_argument('--model_file', type=str, help='Filename of the pretrained model.')
parser.add_argument('--device', type=int, default=0, help='gpu device to use.')
parser.add_argument('--pooling', choices=['max', 'avg', 'sum'], default='max', help='Pooling function type. Default max.')
parser.add_argument('--decay_epoch', type=int, default=5, help='Decay learning rate after this epoch.')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate decay rate.')
args = parser.parse_args()
opt = vars(args)
label2id = constant.LABEL_TO_ID
opt['num_class'] = len(label2id)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
tokenizer = BertTokenizer.from_pretrained('SpanBERT/spanbert-large-cased')
special_tokens_dict = {'additional_special_tokens': constant.ENTITY_TOKENS}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, tokenizer, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, tokenizer, evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_score\tbest_dev_score")
# print model info
helper.print_config(opt)
# model
if not opt['load']:
trainer = BERTtrainer(opt)
else:
# load pretrained model
model_file = opt['model_file']
print("Loading model from {}".format(model_file))
model_opt = torch_utils.load_config(model_file)
model_opt['optim'] = opt['optim']
trainer = BERTtrainer(model_opt)
trainer.load(model_file)
trainer.encoder.model.resize_token_embeddings(len(tokenizer))
id2label = dict([(v,k) for k,v in label2id.items()])
dev_score_history = []
current_lr = opt['lr']
global_step = 0
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, epoch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
x = 0
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for _, batch in enumerate(dev_batch):
preds = trainer.predict(batch, id2label, tokenizer)
predictions += preds
predictions = [id2label[p] for p in predictions]
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
dev_score = dev_f1
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_score, max([dev_score] + dev_score_history)))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
trainer.save(model_file, epoch)
if epoch == 1 or dev_score > max(dev_score_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
file_logger.log("new best model saved at epoch {}: {:.2f}\t{:.2f}\t{:.2f}"\
.format(epoch, dev_p*100, dev_r*100, dev_score*100))
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_score_history) > opt['decay_epoch'] and dev_score <= dev_score_history[-1]:
current_lr *= opt['lr_decay']
trainer.update_lr(current_lr)
dev_score_history += [dev_score]
print("")
print("Training ended with {} epochs.".format(epoch))
| 6,151 | 39.473684 | 142 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/SpanBERT/trainer.py | """
A trainer class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from bert import BERTencoder, BERTclassifier
from utils import constant, torch_utils
from transformers import AdamW
class Trainer(object):
def __init__(self, opt):
raise NotImplementedError
def update(self, batch):
raise NotImplementedError
def predict(self, batch):
raise NotImplementedError
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.classifier.load_state_dict(checkpoint['classifier'])
self.encoder.load_state_dict(checkpoint['encoder'])
device = self.opt['device']
self.opt = checkpoint['config']
self.opt['device'] = device
def save(self, filename, epoch):
params = {
'classifier': self.classifier.state_dict(),
'encoder': self.encoder.state_dict(),
'config': self.opt,
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def unpack_batch(batch, cuda, device):
rules = None
if cuda:
with torch.cuda.device(device):
inputs = [batch[0].to('cuda')]
labels = Variable(batch[1].cuda())
else:
inputs = [Variable(batch[0])]
labels = Variable(batch[1])
tokens = batch[0]
return inputs, labels, tokens
class BERTtrainer(Trainer):
def __init__(self, opt):
self.opt = opt
self.encoder = BERTencoder()
self.classifier = BERTclassifier(opt)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.classifier.parameters() if p.requires_grad] + [p for p in self.encoder.parameters() if p.requires_grad]
if opt['cuda']:
with torch.cuda.device(self.opt['device']):
self.encoder.cuda()
self.classifier.cuda()
self.criterion.cuda()
self.optimizer = AdamW(
self.parameters,
lr=opt['lr'],
)
def update(self, batch, epoch):
inputs, labels, tokens = unpack_batch(batch, self.opt['cuda'], self.opt['device'])
# step forward
self.encoder.train()
self.classifier.train()
self.optimizer.zero_grad()
loss = 0
o, b_out = self.encoder(inputs)
h = o.pooler_output
logits = self.classifier(h)
loss += self.criterion(logits, labels)
if loss != 0:
loss_val = loss.item()
# backward
loss.backward()
self.optimizer.step()
else:
loss_val = 0
return loss_val
def predict(self, batch, id2label, tokenizer, unsort=True):
inputs, labels, tokens = unpack_batch(batch, self.opt['cuda'], self.opt['device'])
tokens = tokens.data.cpu().numpy().tolist()
orig_idx = batch[2]
# forward
self.encoder.eval()
self.classifier.eval()
o, b_out = self.encoder(inputs)
a = o.attentions
a = a[-1]#.data.cpu().numpy()
print (a.size())
h = o.pooler_output
logits = self.classifier(h)
loss = self.criterion(logits, labels)
probs = F.softmax(logits, 1)
predictions = np.argmax(probs.data.cpu().numpy(), axis=1).tolist()
tags = predictions
if unsort:
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx,\
predictions, probs)))]
return predictions | 3,918 | 30.352 | 146 | py |
TextSiM | TextSiM-main/TACRED_evaluation_scripts/SpanBERT/utils/torch_utils.py | """
Utility functions for torch.
"""
import torch
from torch import nn, optim
from torch.optim import Optimizer
### class
class MyAdagrad(Optimizer):
"""My modification of the Adagrad optimizer that allows to specify an initial
accumulater value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial acculmulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) *\
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
### torch specific functions
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name in ['adagrad', 'myadagrad']:
# use my own adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, weight_decay=l2) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2) # use default lr
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
### model IO
def save(model, optimizer, opt, filename):
params = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'config': opt
}
try:
torch.save(params, filename)
except BaseException:
print("[ Warning: model saving failed. ]")
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
if model is not None:
model.load_state_dict(dump['model'])
if optimizer is not None:
optimizer.load_state_dict(dump['optimizer'])
opt = dump['config']
return model, optimizer, opt
def load_config(filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
return dump['config']
| 5,681 | 33.858896 | 106 | py |
TextSiM | TextSiM-main/TACRED_analysis/create_new_simplified_dataset.py | # create_new_simplified_dataset.py
# This file will take the extracted sentences after simplification and
# make a new dataset for the original training/evaluating codes from other papers
# import neccessary libraries
import sys
import json
#helper to find all occurences of substring
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
start += len(sub)
def get_start_end(sentence, subj, obj): #(sentence, word):
# Old version find start, end of subject and object seperately.
# word = word.split()
# sentence = sentence.split()
# start = sentence.index(word[0])
# end = sentence.index(word[0])
# itr = 1
# prev = start
# while itr < len(word):
# end = sentence.index(word[itr], prev+1)
# if end == prev + 1:
# itr += 1
# prev = end
# else:
# start = sentence.index(word[0], start+1)
# prev= start
# itr = 1
# New version, give the obj and subj, we will try to return the position of obj and
# subj that has a smallest distance between the two.
space_pos = list(find_all(sentence, " ")) # to find all the positions of whitespaces we have
# this to determine the relative positon of the subj and obj.
start_subj = list(find_all(sentence, subj))
start_obj = list(find_all(sentence, obj))
subj_pos = start_subj[0]
obj_pos = start_obj[0]
shortest_distance = abs(start_subj[0] - start_obj[0])
for subj_s in start_subj:
for obj_s in start_obj:
if abs(subj_s - obj_s) < shortest_distance:
shortest_distance = abs(subj_s - obj_s)
subj_pos = subj_s
obj_pos = obj_s
# now we have the start of subj and obj that satisfy the shortest distance between them
# We will use this absolute index (character-based) to get the relative index (word-based)
# We do this by compare the absolute start with the each whitespace and accumulate the relative
# index until we make it to where the whitespace is one before the start index.
rel_subj_start = 0
rel_obj_start = 0
for pos in space_pos:
if subj_pos > pos: # keep add one if the subj start position is behind the current whitespace position
rel_subj_start += 1
if obj_pos > pos: # keep add one if the obj start position is behind the current whitespace position
rel_obj_start += 1
return rel_subj_start, (rel_subj_start + len(subj.split())-1), rel_obj_start, (rel_obj_start + len(obj.split())-1)
def create_sample(sentence, tuple_subj_obj, original, pos_rels):
# do something here
# keys: id, docid, relation, token, subj_start, subj_end, obj_start, obj_end, subj_type, obj_type
# copy over the unchanged information from the original sentence
sample = {"id": original["id"], "relation": original["relation"], "docid" : original["docid"], "subj_type" : original["subj_type"], "obj_type" : original["obj_type"]}
# create new list of tokens for new sample
sample["token"] = sentence.split()
# get the start, end for subj, obj
sample["subj_start"], sample["subj_end"], sample["obj_start"], sample["obj_end"] = get_start_end(sentence, tuple_subj_obj[0], tuple_subj_obj[1])
# get stanford_pos:
sample["stanford_pos"] = pos_rels[0].split(" ")
# get stanford_ner
# convert to old stanford_ner
old_ner_with_location = [ner if ner not in ["CITY", "STATE_OR_PROVINCE", "COUNTRY"] else "LOCATION" for ner in pos_rels[1].split(" ")]
old_ner = [ner if ner in ["PERSON", "LOCATION", "ORGANIZATION", "MISC", "MONEY", "NUMBER", "ORDINAL", "PERCENT", "DATE", "TIME", "DURATION", "SET"] else "O" for ner in old_ner_with_location]
sample["stanford_ner"] = old_ner
# get stanford_deprel
sample["stanford_deprel"] = pos_rels[2].split(" ")
# get stanford_head
sample["stanford_head"] = pos_rels[3].split(" ")
# check if the stanford linguistic information lengths match with the number of tokens. If not, we will not have
# a valid dataset to run TACRED official code. Check if they are valid samples
isValid = len(sample["stanford_deprel"]) == len(sample["stanford_head"]) and len(sample["stanford_deprel"]) == len(sample["stanford_ner"]) and len(sample["stanford_ner"]) == len(sample["stanford_pos"]) and len(sample["token"]) == len(sample["stanford_pos"])
# if passes the asserts, simple return sample for simplified dataset.
return (sample, isValid)
def main():
# usage: python3 create_new_simplified_dataset.py [tacred_sim_sentences.txt] [tacred_info.txt]
# [original.json] [tacred_sim_pos_rels.txt][tacred_simplified.json]
# 1. The first argument is the file storing simplified sentences from designated Text Simplifcation system
# 2. The second argument is the file storing information about subj, obj associated with the above sentence
# 3. The third argument is the original file.
# 4. The output of dataset in the format of TACRED.
# open all the input files
sentences = [line.strip("\n") for line in open(sys.argv[1], "r").readlines()]
# for and wiki-auto, we added lower()
# for ACCESS, NeuralText-pytorch removed lower() in info
info = [line.lower().strip("\n").split("\t") for line in open(sys.argv[2], "r").readlines()]
original = json.load(open(sys.argv[3], "r"))
# This will be a list of array of 3: [[POS, NER, DEPREL]]
pos_rels = [line.strip("\n").split("\t") for line in open(sys.argv[4], "r").readlines()]
# check if the original files and simplified sentences have the same length
assert len(sentences) == len(original)
assert len(original) == len(info)
assert len(original) == len(pos_rels)
# Initialize the cases so that we know if we want to add simplified data and its corresponding original
addSim = 0
addCorespOri = 1
# Initialize the case here so that it know if we want to add Original sentences that does not have valid
# simplification (does not preserve th subj and obj)
addCompOri = 0
# initialize holder
simplified = []
# traverse each line of the input files to create samples of the new simplified dataset
for i in range(len(sentences)):
# check if the simplified sentences still consist important information for TACRED task
if " "+info[i][0]+" " in sentences[i] and " "+info[i][1]+" " in sentences[i]:
# if so, then it is valid and be added to the simplified dataset.
# Also, we want to add the validated samples and ignore wrong format one
if addSim:
sample, isValid = create_sample(sentences[i], info[i], original[i], pos_rels[i])
if isValid:
simplified.append(sample)
if addCorespOri:
simplified.append(original[i])
# add the rest of the original samples that do no have valid simplification counterpart
else:
if addCompOri:
simplified.append(original[i])
print("Number of simplified samples:", len(simplified))
# test code on original data create back to original data
print(len(simplified))
# now write new dataset set to the output file
with open(sys.argv[5], "w") as output:
json.dump(simplified, output)
if __name__ == '__main__':
main()
| 6,937 | 46.197279 | 258 | py |
rej-summ | rej-summ-main/preprocessing.py | # -*- coding: utf-8 -*-
import os
import spacy
import torch
import logging
import argparse
from tqdm import tqdm
from fairseq.models.bart import BARTModel
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
def read_lines(file_path):
files = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
files.append(line.strip())
return files
def get_indices(target, tokens):
"""
Get the index of token that is part of the target.
Args:
target: "Mohammad Javad Zarif"
tokens: ['Moh', 'ammad', ' Jav', 'ad', ' Zar', 'if', ' has', ' spent', ...]
Return:
List[int]: [1, 1, 1, 1, 1, 1, 0, 0, ...]
"""
all_indices = []
for i, t in enumerate(tokens):
t = t.strip()
indices = []
if t in target:
indices.append(i)
if t == target:
all_indices.extend(indices)
break
elif i + 1 < len(tokens):
for ni, rt in enumerate(tokens[i + 1:]):
t += rt
indices.append(i + ni + 1)
if t == target:
all_indices.extend(indices)
break
elif t not in target:
break
return all_indices
def build_mask(target, sentence, encode_func, decode_func):
"""
Args:
target (List[str]): "Mohammad Javad Zarif"
sentence (str): "Mohammad Javad Zarif has spent more time with..."
Return:
List[int]: 1 if the token in this position is part of an entity
"""
assert target in sentence
tokens = [decode_func(torch.tensor([i])) for i in encode_func(sentence)]
indices = get_indices(target, tokens)
mask = torch.zeros(len(tokens), dtype=torch.long)
for i in indices:
mask[i] = 1
return mask
def main(args):
# load BART model
bart = BARTModel.from_pretrained(args.bart_dir,
checkpoint_file='model.pt',
data_name_or_path=args.bart_dir)
bart.cuda()
bart.eval()
bart.half()
# load summaries
summaries = read_lines(args.summary_path)
# tokenization
encode_func = lambda x: bart.task.source_dictionary.encode_line(bart.bpe.encode(x), append_eos=True).long()
decode_func = bart.decode
nlp = spacy.load(args.spacy_tokenizer)
ref_masks = []
for summ in tqdm(summaries):
t_ents = [e.text for e in nlp(summ).ents]
mask = None
for e in t_ents:
tmp_mask = build_mask(e, summ, encode_func, decode_func)
if mask is None:
mask = tmp_mask
else:
mask.masked_fill_(tmp_mask.bool(), 1)
# no entities found in the summary
if mask is None:
length = encode_func(summ).shape[0]
mask = torch.zeros(length, dtype=torch.long)
ref_masks.append(mask)
with open(args.output_file, "w") as wf:
for mask in ref_masks:
mask_str = " ".join([str(i) for i in mask.tolist()])
wf.write(mask_str)
wf.write("\n")
logging.info("Masks saved at: {}".format(args.output_file))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--bart_dir",
type=str,
default="BART_models/bart.large.xsum"
)
parser.add_argument(
"--summary_path",
type=str,
default="val.target"
)
parser.add_argument(
"--spacy_tokenizer",
type=str,
default="en_core_web_sm",
const="en_core_web_sm",
nargs="?",
choices=["en_core_web_sm", "en_core_web_trf"],
)
parser.add_argument(
"--output_file",
type=str,
default="val.mask"
)
args = parser.parse_args()
main(args) | 3,925 | 26.263889 | 111 | py |
rej-summ | rej-summ-main/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from torch.utils import cpp_extension
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
),
cpp_extension.CppExtension(
"alignment_train_cpu_binding",
sources=[
"examples/operators/alignment_train_cpu.cpp",
],
),
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
cpp_extension.CppExtension(
"alignment_train_cuda_binding",
sources=[
"examples/operators/alignment_train_kernel.cu",
"examples/operators/alignment_train_cuda.cpp",
],
),
]
)
cmdclass = {"build_ext": cpp_extension.BuildExtension}
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
install_requires=[
"cffi",
"cython",
"hydra-core>=1.0.7,<1.1",
"omegaconf<2.1",
"numpy>=1.21.3",
"regex",
"sacrebleu>=1.4.12",
"torch>=1.10",
"tqdm",
"bitarray",
"torchaudio>=0.8.0",
],
extras_require={
"dev": ["flake8", "pytest", "black==22.3.0"],
"docs": ["sphinx", "sphinx-argparse"],
},
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples)
+ get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
| 7,589 | 28.648438 | 92 | py |
rej-summ | rej-summ-main/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
| 2,099 | 27.378378 | 82 | py |
rej-summ | rej-summ-main/examples/truncated_bptt/transformer_xl_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
| 4,738 | 31.909722 | 84 | py |
rej-summ | rej-summ-main/examples/truncated_bptt/truncated_bptt_lm_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024, metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
num_workers=0,
epoch=1,
data_buffer_size=0,
skip_remainder_batch=False,
**kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
skip_remainder_batch=skip_remainder_batch,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id] * item.size(0)),
"net_input": {"src_tokens": item,},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
| 9,995 | 33.951049 | 86 | py |
rej-summ | rej-summ-main/examples/linformer/linformer_src/modules/multihead_linear_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
@with_incremental_state
class MultiheadLinearAttention(nn.Module):
"""Multi-headed linformer attention.
Projects the key and values down to the compressed dimension, before computing self-attention.
See "Linformer: Self-Attention with Linear Complexity" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
compressed=1,
max_seq_len=256,
shared_kv_compressed=0,
shared_compress_layer=None,
freeze_compress=0,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
# used for compress sequence to subsequence
if shared_compress_layer is None:
self.compress_seq_len = max_seq_len // compressed
self.compress_k = nn.Linear(max_seq_len, self.compress_seq_len, bias=False)
if shared_kv_compressed == 0:
self.compress_v = nn.Linear(
max_seq_len, self.compress_seq_len, bias=False
)
self.layerwise_sharing = False
else:
self.compress_k = shared_compress_layer
if shared_kv_compressed == 0:
self.compress_v = shared_compress_layer
self.layerwise_sharing = True
self.shared_kv_compressed = shared_kv_compressed
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
if freeze_compress == 1:
self.compress_k.weight.requires_grad = False
if shared_kv_compressed == 0:
self.compress_v.weight.requires_grad = False
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight, gain=1 / math.sqrt(2))
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(
self.compress_v.weight, gain=1 / math.sqrt(2)
)
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight)
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(self.compress_v.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k_input = query.permute(1, 2, 0).contiguous() # B * C * T
k_input = (
F.linear(k_input, self.compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
k = self.k_proj(k_input)
v_input = query.permute(1, 2, 0).contiguous() # B * C * T
if self.shared_kv_compressed == 0:
v_input = (
F.linear(v_input, self.compress_v.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
if self.shared_kv_compressed == 1: # use shared kv compressed linear layer
v_input = (
F.linear(v_input, self.compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
v = self.v_proj(v_input)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadLinearAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadLinearAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz
)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 19,151 | 38.73444 | 98 | py |
rej-summ | rej-summ-main/examples/linformer/linformer_src/modules/linformer_sentence_encoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
from fairseq.models.transformer import TransformerEncoder
from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer
class LinformerTransformerEncoder(TransformerEncoder):
"""
Implementation for a Bi-directional Linformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
LinformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(self, args, dictionary, embed_tokens):
self.compress_layer = None
super().__init__(args, dictionary, embed_tokens)
def build_encoder_layer(self, args):
if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None:
compress_layer = nn.Linear(
self.args.max_positions,
self.args.max_positions // self.args.compressed,
)
# intialize parameters for compressed layer
nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2))
if self.args.freeze_compress == 1:
compress_layer.weight.requires_grad = False
self.compress_layer = compress_layer
return LinformerTransformerEncoderLayer(args, self.compress_layer)
| 2,151 | 38.127273 | 85 | py |
rej-summ | rej-summ-main/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.modules import TransformerEncoderLayer
from .multihead_linear_attention import MultiheadLinearAttention
class LinformerTransformerEncoderLayer(TransformerEncoderLayer):
"""
Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(self, args, shared_compress_layer):
# wrap in a list so it's not automatically registered by PyTorch
self.shared_compress_layer = [shared_compress_layer]
super().__init__(args)
self.register_buffer("version", torch.tensor(2))
def build_self_attention(self, embed_dim, args):
return MultiheadLinearAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.dropout,
self_attention=True,
q_noise=args.quant_noise_pq,
qn_block_size=args.quant_noise_pq_block_size,
compressed=args.compressed,
max_seq_len=args.max_positions,
shared_kv_compressed=args.shared_kv_compressed,
shared_compress_layer=self.shared_compress_layer[0],
freeze_compress=args.freeze_compress,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
# some old checkpoints had weight sharing implemented incorrectly
# (note: this was correct in the original paper code)
if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2:
state_dict[f"{prefix}version"] = torch.tensor(1)
# check compression layer sharing
if f"{prefix}shared_compress_layer.weight" in state_dict:
# reinitialize block without sharing compression layer to match
# old behavior
self.shared_compress_layer = [
torch.nn.Linear(
self.shared_compress_layer[0].weight.size(1),
self.shared_compress_layer[0].weight.size(0),
)
]
self.self_attn = self.build_self_attention(self.embed_dim, self.args)
# delete shared_compress_layer, since it's already copied to
# self_attn.compress_k.weight
del state_dict[f"{prefix}shared_compress_layer.weight"]
if f"{prefix}shared_compress_layer.bias" in state_dict:
del state_dict[f"{prefix}shared_compress_layer.bias"]
| 2,743 | 40.575758 | 85 | py |
rej-summ | rej-summ-main/examples/linformer/linformer_src/models/linformer_roberta.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Linformer: Self-Attention with Linear Complexity
"""
import logging
import torch
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.roberta import (
init_bert_params,
roberta_base_architecture,
roberta_large_architecture,
RobertaEncoder,
RobertaModel,
)
from fairseq.utils import safe_hasattr
from ..modules.linformer_sentence_encoder import LinformerTransformerEncoder
logger = logging.getLogger(__name__)
@register_model("linformer_roberta")
class LinformerModel(RobertaModel):
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
# add args for Linformer
parser.add_argument(
"--compressed", type=int, help="compressed ratio of sequence length"
)
parser.add_argument(
"--shared-kv-compressed",
type=int,
help="share compressed matrix between k and v, in each layer",
)
parser.add_argument(
"--shared-layer-kv-compressed",
type=int,
help="share compressed matrix between k and v and across all layers",
)
parser.add_argument(
"--freeze-compress",
type=int,
help="freeze the parameters in compressed layer",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
if not safe_hasattr(args, "max_positions"):
args.max_positions = args.tokens_per_sample
encoder = LinformerEncoder(args, task.source_dictionary)
return cls(args, encoder)
class LinformerEncoder(RobertaEncoder):
"""Linformer encoder."""
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.register_buffer("version", torch.tensor(2))
def build_encoder(self, args, dictionary, embed_tokens):
encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
# some old checkpoints had weight sharing implemented incorrectly
# (note: this was correct in the original paper code)
if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2:
state_dict[f"{prefix}version"] = torch.tensor(1)
# check if input embeddings and output embeddings were tied
if not torch.allclose(
state_dict[f"{prefix}sentence_encoder.embed_tokens.weight"],
state_dict[f"{prefix}lm_head.weight"],
):
# they weren't tied, re-init the LM head without weight sharing
self.lm_head = self.build_lm_head(
embed_dim=self.args.encoder_embed_dim,
output_dim=len(self.dictionary),
activation_fn=self.args.activation_fn,
weight=None, # don't share weights
)
@register_model_architecture("linformer_roberta", "linformer_roberta")
def base_architecture(args):
args.compressed = getattr(args, "compressed", 4)
args.shared_kv_compressed = getattr(args, "shared_kv_compressed", 0)
args.shared_layer_kv_compressed = getattr(args, "shared_layer_kv_compressed", 0)
args.freeze_compress = getattr(args, "freeze_compress", 0)
roberta_base_architecture(args)
@register_model_architecture("linformer_roberta", "linformer_roberta_base")
def linformer_roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("linformer_roberta", "linformer_roberta_large")
def linformer_roberta_large_architecture(args):
roberta_large_architecture(args)
base_architecture(args)
| 4,143 | 33.247934 | 84 | py |
rej-summ | rej-summ-main/examples/wav2vec/vq-wav2vec_featurize.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
import os.path as osp
import pprint
import soundfile as sf
import torch
import fairseq
from torch import nn
from torch.utils.data import DataLoader
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, "r") as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, "r") as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard :: self.args.num_shards]
lbls = []
with open(self.data_file(split), "w") as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + "\n")
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), "w") as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files) // 32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint])
model = model[0]
self.quantize_location = getattr(cfg.model, "vq", "encoder")
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (
self.args.shard is None or self.args.shard == 0
):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.")
| 7,680 | 29.601594 | 99 | py |
rej-summ | rej-summ-main/examples/wav2vec/wav2vec_featurize.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import numpy as np
import soundfile as sf
import torch
import tqdm
import fairseq
from torch import nn
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname])
model = model[0]
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for flashlight datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i", help="Input Directory", **kwargs)
self.add_argument("--output", "-o", help="Output Directory", **kwargs)
self.add_argument("--model", help="Path to model checkpoint", **kwargs)
self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs)
self.add_argument(
"--ext", default="wav", required=False, help="Audio file extension"
)
self.add_argument(
"--no-copy-labels",
action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.",
)
self.add_argument(
"--use-feat",
action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features",
)
self.add_argument("--gpu", help="GPU to use", default=0, type=int)
class Prediction:
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer:
""" Write features as hdf5 file in flashlight compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
"""Given a model and a flashlight dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the flashlight dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(
self,
input_root,
output_root,
split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(
self.input_path
)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(
lambda x: os.path.join(
self.output_path, x.replace("." + self.extension, ".h5context")
),
map(os.path.basename, paths),
)
for name, target_fname in self._progress(
zip(paths, fnames_context), total=len(self)
):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__
)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| 7,020 | 27.084 | 135 | py |
rej-summ | rej-summ-main/examples/wav2vec/xlsr/scripts/gen_audio_embedding.py | """
Usage:
This script is used to extract the embedding / logit for speech classification task.
1. Set fdir into your model checkpoint directory
2. Run the following command (preferrably on GPU machine to speed up the inference process)
CUDA_VISIBLE_DEVICES=0 python3 examples/wav2vec/gen_audio_embedding.py /fsx/data/VoxLingua107/manifest --path ${fdir} \
--task audio_classification --batch-size 90 --gen-subset test \
--infer-manifest /fsx/data/VoxLingua107/manifest/test.tsv \
--infer-xtimes 10 --infer-max-sample-size 160000 --output-path $odir
Example:
Case: LID logit extraction
fdir='/fsx/androstj/exps/voxlingua_lid_train_all/ckpt_100pct_300m_voxling-act_linear-pool_mean_fast-lr_1e-4-phase_0.1_0.4_0.5-maxupd_100000-ufreq_1-mprob_0.5-fz_0-cr_softmax/0/checkpoints/checkpoint_best.pt'
python3 examples/wav2vec/gen_audio_embedding.py /fsx/data/VoxLingua107/manifest --path ${fdir} \
--task audio_classification --batch-size 90 --gen-subset test \
--infer-manifest /fsx/data/VoxLingua107/manifest/test.tsv \
--infer-xtimes 10 --infer-max-sample-size 160000 --output-path $odir
"""
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from fairseq import checkpoint_utils, data, options, tasks
from fairseq.data import FileAudioDataset, AddTargetDataset, Dictionary
from fairseq.tasks.audio_classification import LabelEncoder
import ipdb
import copy
import sys
from tqdm import tqdm
import tempfile
import numpy as np
import sklearn
def subset_manifest(infer_manifest, veri_pair):
with open(infer_manifest) as ff, open(veri_pair) as gg, \
tempfile.NamedTemporaryFile('w', delete=False) as ww:
fnames = ff.read().strip().split("\n")
basedir = fnames[0]
needed_fname = []
for gi in gg.read().strip().split('\n'):
_, x1, x2 = gi.split()
needed_fname.append(x1)
needed_fname.append(x2)
needed_fname = set(needed_fname)
ww.write(basedir+'\n')
for ii in range(1, len(fnames)):
x1,x2 = fnames[ii].split()
if x1 in needed_fname:
ww.write(fnames[ii]+'\n')
print(f'| subset manifest for verification: {ww.name}')
return ww.name
def wrap_target_dataset(infer_manifest, dataset, task):
label_path = infer_manifest.replace(".tsv", ".label")
with open(label_path, "r") as f:
labels = f.read().strip().split("\n")
assert len(labels) == len(dataset)
process_label = LabelEncoder(task.target_dictionary)
dataset = AddTargetDataset(dataset, labels,
pad=task.target_dictionary.pad(),
eos=task.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
add_to_input=False)
return dataset
def resample_data(source, padding_mask, n_sample, max_sample_len):
# source: BxT
# padding_mask: BxT
B = source.shape[0]
T = source.shape[1]
sources = []
padding_masks = []
seq_len = (~padding_mask).sum(1)
for jj in range(n_sample):
new_source = source.new_zeros(B, max_sample_len)
new_padding_mask = padding_mask.new_zeros(B, max_sample_len)
for ii in range(B):
if seq_len[ii] > max_sample_len:
start = np.random.randint(0, seq_len[ii]-max_sample_len+1)
end = start + max_sample_len
else :
start = 0
end = seq_len[ii]
new_source[ii, 0:end-start] = source[ii, start:end]
new_padding_mask[ii, end-start+1:] = True
sources.append(new_source)
padding_masks.append(new_padding_mask)
return sources, padding_masks
def resample_sample(sample, n_sample, max_sample_len):
new_sources, new_padding_masks = resample_data(sample['net_input']['source'], sample['net_input']['padding_mask'], n_sample, max_sample_len)
new_samples = []
for ii in range(n_sample):
new_sample = copy.deepcopy(sample)
new_sample['net_input']['source'] = new_sources[ii]
new_sample['net_input']['padding_mask'] = new_padding_masks[ii]
new_samples.append(new_sample)
return new_samples
if __name__ == '__main__':
np.random.seed(123)
# Parse command-line arguments for generation
parser = options.get_generation_parser(default_task='audio_classification')
# parser.add_argument('--infer-merge', type=str, default='mean')
parser.add_argument('--infer-xtimes', type=int, default=1)
parser.add_argument('--infer-max-sample-size', type=int, default=5*16000) # 5 secs
parser.add_argument('--infer-manifest', type=str)
parser.add_argument('--verification-pair', type=str, required=False,
help='''
a file that contains pairs of utts to evaluated if they are from same speaker or not
format: (following voxceleb)
1/0 <wav_pair_a> <wav_pair_b>
''')
parser.add_argument('--output-path', type=str)
# parser.add_argument('--infer-xtimes', type=int, default=1)
args = options.parse_args_and_arch(parser)
# Setup task
# task = tasks.setup_task(args)
use_cuda = not args.cpu
# Load model & task
print('| loading model from {}'.format(args.path))
arg_overrides = {
'data': args.data,
# 'mask_prob': 0
#'max_sample_size': sys.maxsize,
#'min_sample_size': 0,
}
state = checkpoint_utils.load_checkpoint_to_cpu(args.path)
# move to AWS
state['cfg']['model']['w2v_path'] = state['cfg']['model']['w2v_path'].replace('/checkpoint/arbabu/XLSR2/model_versions/', '/fsx/data/model_versions/').replace('/checkpoint/kushall/final_model_checkpoints/wav2vec2/', '/fsx/data/wav2vec_ckpt/')
state['cfg']['task']['data'] = state['cfg']['task']['data'].replace('/checkpoint/kushall/data/', '/fsx/data/')
models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task([args.path],
arg_overrides=arg_overrides,
task=None,
state=state)
model = models[0]
model.eval()
if use_cuda:
model.cuda()
# Load dataset
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
infer_manifest = args.infer_manifest
# only decode needed utts
# infer_manifest = subset_manifest(infer_manifest,
# args.verification_pair)
infer_dataset = FileAudioDataset(infer_manifest,
sample_rate=task.cfg.sample_rate,
max_sample_size=10**10, #task.cfg.max_sample_size,
min_sample_size=1, #task.cfg.min_sample_size,
pad=True,
normalize=task.cfg.normalize)
# add target (if needed)
infer_dataset = wrap_target_dataset(infer_manifest, infer_dataset, task)
itr = task.get_batch_iterator(
dataset=infer_dataset,
max_sentences=args.batch_size,
).next_epoch_itr(shuffle=False)
# correct = 0
# total = 0
list_uttname = []
list_latent = []
list_logit = []
list_target = []
list_src_len = []
with torch.no_grad():
for _, sample in tqdm(enumerate(itr)):
# resample if needed
samples = resample_sample(sample, args.infer_xtimes, args.infer_max_sample_size)
list_uttname.extend(sample['name'])
list_target.extend(sample['target'][:, 0].cpu().numpy())
list_src_len.extend((~sample['net_input']['padding_mask']).sum(1).cpu().numpy())
latents = []
logits = []
for sample in samples:
sample = utils.move_to_cuda(sample) if use_cuda else sample
try:
latent = model.forward_latent(**sample['net_input'])
latents.append(latent.detach().cpu().numpy())
except:
latent = None
logit = model.forward(**sample['net_input'])
logits.append(logit.detach().cpu().numpy())
if len(latents) > 0:
latents = np.stack(latents, 1) # B,X,D
logits = np.stack(logits, 1) # B,X,Cls
list_latent.extend(latents)
list_logit.extend(logits)
# create big npz
list_uttname = np.array(list_uttname)
list_latent = np.array(list_latent)
list_target = np.array(list_target)
list_logit = np.array(list_logit)
list_src_len = np.array(list_src_len)
# save to npz
output_path = args.output_path
if (output_path is None):
output_path = tempfile.NamedTemporaryFile('wb', delete=False).name
with open(output_path, 'wb') as ww:
np.savez(ww, name=list_uttname,
latent=list_latent,
target=list_target,
logit=list_logit,
src_len=list_src_len)
print("="*10 + " REPORT " + "="*10)
print(f'| latent saved in {output_path}')
print(f'| {list_uttname.shape=}, {list_latent.shape=}, {list_target.shape=}, {list_logit.shape=}, {list_src_len.shape=}')
| 9,209 | 40.300448 | 246 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/w2vu_generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
from collections import namedtuple
from dataclasses import dataclass, field
from enum import Enum, auto
import hydra
from hydra.core.config_store import ConfigStore
import logging
import math
import os
from omegaconf import OmegaConf
from typing import Optional
import sys
import editdistance
import torch
from hydra.core.hydra_config import HydraConfig
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import FairseqDataclass, FairseqConfig
from fairseq.logging.meters import StopwatchMeter
from omegaconf import open_dict
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoderConfig
logging.root.setLevel(logging.INFO)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class DecoderType(Enum):
VITERBI = auto()
KENLM = auto()
FAIRSEQ = auto()
KALDI = auto()
@dataclass
class UnsupGenerateConfig(FairseqDataclass):
fairseq: FairseqConfig = FairseqConfig()
lm_weight: float = field(
default=2.0,
metadata={"help": "language model weight"},
)
w2l_decoder: DecoderType = field(
default=DecoderType.VITERBI,
metadata={"help": "type of decoder to use"},
)
kaldi_decoder_config: Optional[KaldiDecoderConfig] = None
lexicon: Optional[str] = field(
default=None,
metadata={
"help": "path to lexicon. This is also used to 'phonemize' for unsupvised param tuning"
},
)
lm_model: Optional[str] = field(
default=None,
metadata={"help": "path to language model (kenlm or fairseq)"},
)
decode_stride: Optional[float] = field(
default=None,
metadata={"help": "changing the decoding frequency of the generator"},
)
unit_lm: bool = field(
default=False,
metadata={"help": "whether to use unit lm"},
)
beam_threshold: float = field(
default=50.0,
metadata={"help": "beam score threshold"},
)
beam_size_token: float = field(
default=100.0,
metadata={"help": "max tokens per beam"},
)
beam: int = field(
default=5,
metadata={"help": "decoder beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of results to return"},
)
word_score: float = field(
default=1.0,
metadata={"help": "word score to add at end of word"},
)
unk_weight: float = field(
default=-math.inf,
metadata={"help": "unknown token weight"},
)
sil_weight: float = field(
default=0.0,
metadata={"help": "silence token weight"},
)
targets: Optional[str] = field(
default=None,
metadata={"help": "extension of ground truth labels to compute UER"},
)
results_path: Optional[str] = field(
default=None,
metadata={"help": "where to store results"},
)
post_process: Optional[str] = field(
default=None,
metadata={"help": "how to post process results"},
)
vocab_usage_power: float = field(
default=2,
metadata={"help": "for unsupervised param tuning"},
)
viterbi_transcript: Optional[str] = field(
default=None,
metadata={"help": "for unsupervised param tuning"},
)
min_lm_ppl: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
min_vt_uer: float = field(
default=0,
metadata={"help": "for unsupervised param tuning"},
)
blank_weight: float = field(
default=0,
metadata={"help": "value to add or set for blank emission"},
)
blank_mode: str = field(
default="set",
metadata={
"help": "can be add or set, how to modify blank emission with blank weight"
},
)
sil_is_blank: bool = field(
default=False,
metadata={"help": "if true, <SIL> token is same as blank token"},
)
unsupervised_tuning: bool = field(
default=False,
metadata={
"help": "if true, returns a score based on unsupervised param selection metric instead of UER"
},
)
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def get_dataset_itr(cfg, task):
return task.get_batch_iterator(
dataset=task.dataset(cfg.fairseq.dataset.gen_subset),
max_tokens=cfg.fairseq.dataset.max_tokens,
max_sentences=cfg.fairseq.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=cfg.fairseq.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.fairseq.dataset.required_batch_size_multiple,
num_shards=cfg.fairseq.dataset.num_shards,
shard_id=cfg.fairseq.dataset.shard_id,
num_workers=cfg.fairseq.dataset.num_workers,
data_buffer_size=cfg.fairseq.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
cfg: UnsupGenerateConfig,
hypos,
tgt_dict,
target_tokens,
res_files,
):
retval = []
word_preds = []
transcriptions = []
dec_scores = []
for i, hypo in enumerate(hypos[: min(len(hypos), cfg.nbest)]):
if torch.is_tensor(hypo["tokens"]):
tokens = hypo["tokens"].int().cpu()
tokens = tokens[tokens >= tgt_dict.nspecial]
hyp_pieces = tgt_dict.string(tokens)
else:
hyp_pieces = " ".join(hypo["tokens"])
if "words" in hypo and len(hypo["words"]) > 0:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, cfg.post_process)
to_write = {}
if res_files is not None:
to_write[res_files["hypo.units"]] = hyp_pieces
to_write[res_files["hypo.words"]] = hyp_words
tgt_words = ""
if target_tokens is not None:
if isinstance(target_tokens, str):
tgt_pieces = tgt_words = target_tokens
else:
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, cfg.post_process)
if res_files is not None:
to_write[res_files["ref.units"]] = tgt_pieces
to_write[res_files["ref.words"]] = tgt_words
if not cfg.fairseq.common_eval.quiet:
logger.info(f"HYPO {i}:" + hyp_words)
if tgt_words:
logger.info("TARGET:" + tgt_words)
if "am_score" in hypo and "lm_score" in hypo:
logger.info(
f"DECODER AM SCORE: {hypo['am_score']}, DECODER LM SCORE: {hypo['lm_score']}, DECODER SCORE: {hypo['score']}"
)
elif "score" in hypo:
logger.info(f"DECODER SCORE: {hypo['score']}")
logger.info("___________________")
hyp_words_arr = hyp_words.split()
tgt_words_arr = tgt_words.split()
retval.append(
(
editdistance.eval(hyp_words_arr, tgt_words_arr),
len(hyp_words_arr),
len(tgt_words_arr),
hyp_pieces,
hyp_words,
)
)
word_preds.append(hyp_words_arr)
transcriptions.append(to_write)
dec_scores.append(-hypo.get("score", 0)) # negate cuz kaldi returns NLL
if len(retval) > 1:
best = None
for r, t in zip(retval, transcriptions):
if best is None or r[0] < best[0][0]:
best = r, t
for dest, tran in best[1].items():
print(tran, file=dest)
dest.flush()
return best[0]
assert len(transcriptions) == 1
for dest, tran in transcriptions[0].items():
print(tran, file=dest)
return retval[0]
def prepare_result_files(cfg: UnsupGenerateConfig):
def get_res_file(file_prefix):
if cfg.fairseq.dataset.num_shards > 1:
file_prefix = f"{cfg.fairseq.dataset.shard_id}_{file_prefix}"
path = os.path.join(
cfg.results_path,
"{}{}.txt".format(
cfg.fairseq.dataset.gen_subset,
file_prefix,
),
)
return open(path, "w", buffering=1)
if not cfg.results_path:
return None
return {
"hypo.words": get_res_file(""),
"hypo.units": get_res_file("_units"),
"ref.words": get_res_file("_ref"),
"ref.units": get_res_file("_ref_units"),
"hypo.nbest.words": get_res_file("_nbest_words"),
}
def optimize_models(cfg: UnsupGenerateConfig, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.eval()
if cfg.fairseq.common.fp16:
model.half()
if use_cuda:
model.cuda()
GenResult = namedtuple(
"GenResult",
[
"count",
"errs_t",
"gen_timer",
"lengths_hyp_unit_t",
"lengths_hyp_t",
"lengths_t",
"lm_score_t",
"num_feats",
"num_sentences",
"num_symbols",
"vt_err_t",
"vt_length_t",
],
)
def generate(cfg: UnsupGenerateConfig, models, saved_cfg, use_cuda):
task = tasks.setup_task(cfg.fairseq.task)
saved_cfg.task.labels = cfg.fairseq.task.labels
task.load_dataset(cfg.fairseq.dataset.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
cfg.fairseq.task.data,
cfg.fairseq.dataset.gen_subset,
len(task.dataset(cfg.fairseq.dataset.gen_subset)),
)
)
# Load dataset (possibly sharded)
itr = get_dataset_itr(cfg, task)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(cfg: UnsupGenerateConfig):
w2l_decoder = cfg.w2l_decoder
if w2l_decoder == DecoderType.VITERBI:
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KENLM:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.FAIRSEQ:
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(cfg, task.target_dictionary)
elif w2l_decoder == DecoderType.KALDI:
from examples.speech_recognition.kaldi.kaldi_decoder import KaldiDecoder
assert cfg.kaldi_decoder_config is not None
return KaldiDecoder(
cfg.kaldi_decoder_config,
cfg.beam,
)
else:
raise NotImplementedError(
"only wav2letter decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment but found "
+ str(w2l_decoder)
)
generator = build_generator(cfg)
kenlm = None
fairseq_lm = None
if cfg.lm_model is not None:
import kenlm
kenlm = kenlm.Model(cfg.lm_model)
num_sentences = 0
if cfg.results_path is not None and not os.path.exists(cfg.results_path):
os.makedirs(cfg.results_path)
res_files = prepare_result_files(cfg)
errs_t = 0
lengths_hyp_t = 0
lengths_hyp_unit_t = 0
lengths_t = 0
count = 0
num_feats = 0
all_hyp_pieces = []
all_hyp_words = []
num_symbols = (
len([s for s in tgt_dict.symbols if not s.startswith("madeup")])
- tgt_dict.nspecial
)
targets = None
if cfg.targets is not None:
tgt_path = os.path.join(
cfg.fairseq.task.data, cfg.fairseq.dataset.gen_subset + "." + cfg.targets
)
if os.path.exists(tgt_path):
with open(tgt_path, "r") as f:
targets = f.read().splitlines()
viterbi_transcript = None
if cfg.viterbi_transcript is not None and len(cfg.viterbi_transcript) > 0:
logger.info(f"loading viterbi transcript from {cfg.viterbi_transcript}")
with open(cfg.viterbi_transcript, "r") as vf:
viterbi_transcript = vf.readlines()
viterbi_transcript = [v.rstrip().split() for v in viterbi_transcript]
gen_timer.start()
start = 0
end = len(itr)
hypo_futures = None
if cfg.w2l_decoder == DecoderType.KALDI:
logger.info("Extracting features")
hypo_futures = []
samples = []
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if "net_input" not in sample or i < start or i >= end:
continue
if "padding_mask" not in sample["net_input"]:
sample["net_input"]["padding_mask"] = None
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
hypo_futures.append(hypos)
samples.append(sample)
itr = list(zip(hypo_futures, samples))
start = 0
end = len(itr)
logger.info("Finished extracting features")
with progress_bar.build_progress_bar(cfg.fairseq.common, itr) as t:
for i, sample in enumerate(t):
if i < start or i >= end:
continue
if hypo_futures is not None:
hypos, sample = sample
hypos = [h.result() for h in hypos]
else:
if "net_input" not in sample:
continue
hypos, num_feats = gen_hypos(
generator, models, num_feats, sample, task, use_cuda
)
for i, sample_id in enumerate(sample["id"].tolist()):
if targets is not None:
target_tokens = targets[sample_id]
elif "target" in sample or "target_label" in sample:
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
else:
target_tokens = None
# Process top predictions
(
errs,
length_hyp,
length,
hyp_pieces,
hyp_words,
) = process_predictions(
cfg,
hypos[i],
tgt_dict,
target_tokens,
res_files,
)
errs_t += errs
lengths_hyp_t += length_hyp
lengths_hyp_unit_t += (
len(hyp_pieces) if len(hyp_pieces) > 0 else len(hyp_words)
)
lengths_t += length
count += 1
all_hyp_pieces.append(hyp_pieces)
all_hyp_words.append(hyp_words)
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
lm_score_sum = 0
if kenlm is not None:
if cfg.unit_lm:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_pieces)
else:
lm_score_sum = sum(kenlm.score(w) for w in all_hyp_words)
elif fairseq_lm is not None:
lm_score_sum = sum(fairseq_lm.score([h.split() for h in all_hyp_words])[0])
vt_err_t = 0
vt_length_t = 0
if viterbi_transcript is not None:
unit_hyps = []
if cfg.targets is not None and cfg.lexicon is not None:
lex = {}
with open(cfg.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
lex[items[0]] = items[1:]
for h in all_hyp_pieces:
hyp_ws = []
for w in h.split():
assert w in lex, w
hyp_ws.extend(lex[w])
unit_hyps.append(hyp_ws)
else:
unit_hyps.extend([h.split() for h in all_hyp_words])
vt_err_t = sum(
editdistance.eval(vt, h) for vt, h in zip(viterbi_transcript, unit_hyps)
)
vt_length_t = sum(len(h) for h in viterbi_transcript)
if res_files is not None:
for r in res_files.values():
r.close()
gen_timer.stop(lengths_hyp_t)
return GenResult(
count,
errs_t,
gen_timer,
lengths_hyp_unit_t,
lengths_hyp_t,
lengths_t,
lm_score_sum,
num_feats,
num_sentences,
num_symbols,
vt_err_t,
vt_length_t,
)
def gen_hypos(generator, models, num_feats, sample, task, use_cuda):
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "features" in sample["net_input"]:
sample["net_input"]["dense_x_only"] = True
num_feats += (
sample["net_input"]["features"].shape[0]
* sample["net_input"]["features"].shape[1]
)
hypos = task.inference_step(generator, models, sample, None)
return hypos, num_feats
def main(cfg: UnsupGenerateConfig, model=None):
if (
cfg.fairseq.dataset.max_tokens is None
and cfg.fairseq.dataset.batch_size is None
):
cfg.fairseq.dataset.max_tokens = 1024000
use_cuda = torch.cuda.is_available() and not cfg.fairseq.common.cpu
task = tasks.setup_task(cfg.fairseq.task)
overrides = ast.literal_eval(cfg.fairseq.common_eval.model_overrides)
if cfg.fairseq.task._name == "unpaired_audio_text":
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
"blank_is_sil": cfg.sil_is_blank,
"no_softmax": True,
"segmentation": {
"type": "NONE",
},
}
else:
overrides["model"] = {
"blank_weight": cfg.blank_weight,
"blank_mode": cfg.blank_mode,
}
if cfg.decode_stride:
overrides["model"]["generator_stride"] = cfg.decode_stride
if model is None:
# Load ensemble
logger.info("| loading model(s) from {}".format(cfg.fairseq.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
cfg.fairseq.common_eval.path.split("\\"),
arg_overrides=overrides,
task=task,
suffix=cfg.fairseq.checkpoint.checkpoint_suffix,
strict=(cfg.fairseq.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.fairseq.checkpoint.checkpoint_shard_count,
)
optimize_models(cfg, use_cuda, models)
else:
models = [model]
saved_cfg = cfg.fairseq
with open_dict(saved_cfg.task):
saved_cfg.task.shuffle = False
saved_cfg.task.sort_by_length = False
gen_result = generate(cfg, models, saved_cfg, use_cuda)
wer = None
if gen_result.lengths_t > 0:
wer = gen_result.errs_t * 100.0 / gen_result.lengths_t
logger.info(f"WER: {wer}")
lm_ppl = float("inf")
if gen_result.lm_score_t != 0 and gen_result.lengths_hyp_t > 0:
hyp_len = gen_result.lengths_hyp_t
lm_ppl = math.pow(
10, -gen_result.lm_score_t / (hyp_len + gen_result.num_sentences)
)
logger.info(f"LM PPL: {lm_ppl}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
" sentences/s, {:.2f} tokens/s)".format(
gen_result.num_sentences,
gen_result.gen_timer.n,
gen_result.gen_timer.sum,
gen_result.num_sentences / gen_result.gen_timer.sum,
1.0 / gen_result.gen_timer.avg,
)
)
vt_diff = None
if gen_result.vt_length_t > 0:
vt_diff = gen_result.vt_err_t / gen_result.vt_length_t
vt_diff = max(cfg.min_vt_uer, vt_diff)
lm_ppl = max(cfg.min_lm_ppl, lm_ppl)
if not cfg.unsupervised_tuning:
weighted_score = wer
else:
weighted_score = math.log(lm_ppl) * (vt_diff or 1.0)
res = (
f"| Generate {cfg.fairseq.dataset.gen_subset} with beam={cfg.beam}, "
f"lm_weight={cfg.kaldi_decoder_config.acoustic_scale if cfg.kaldi_decoder_config else cfg.lm_weight}, "
f"word_score={cfg.word_score}, sil_weight={cfg.sil_weight}, blank_weight={cfg.blank_weight}, "
f"WER: {wer}, LM_PPL: {lm_ppl}, num feats: {gen_result.num_feats}, "
f"length: {gen_result.lengths_hyp_t}, UER to viterbi: {(vt_diff or 0) * 100}, score: {weighted_score}"
)
logger.info(res)
# print(res)
return task, weighted_score
@hydra.main(
config_path=os.path.join("../../..", "fairseq", "config"), config_name="config"
)
def hydra_main(cfg):
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=False, enum_to_str=False)
)
OmegaConf.set_struct(cfg, True)
logger.info(cfg)
utils.import_user_module(cfg.fairseq.common)
_, score = main(cfg)
if cfg.is_ax:
return score, None
return score
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=UnsupGenerateConfig)
hydra_main()
if __name__ == "__main__":
cli_main()
| 22,454 | 30.405594 | 129 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/models/wav2vec_u.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum, auto
import math
import numpy as np
from typing import Tuple, List, Optional, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from fairseq import checkpoint_utils, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
SamePad,
TransposeLast,
)
class SegmentationType(Enum):
NONE = auto()
RANDOM = auto()
UNIFORM_RANDOM = auto()
UNIFORM_RANDOM_JOIN = auto()
JOIN = auto()
@dataclass
class SegmentationConfig(FairseqDataclass):
type: SegmentationType = SegmentationType.NONE
subsample_rate: float = 0.25
mean_pool: bool = True
mean_pool_join: bool = False
remove_zeros: bool = False
@dataclass
class Wav2vec_UConfig(FairseqDataclass):
discriminator_kernel: int = 3
discriminator_dilation: int = 1
discriminator_dim: int = 256
discriminator_causal: bool = True
discriminator_linear_emb: bool = False
discriminator_depth: int = 1
discriminator_max_pool: bool = False
discriminator_act_after_linear: bool = False
discriminator_dropout: float = 0.0
discriminator_spectral_norm: bool = False
discriminator_weight_norm: bool = False
generator_kernel: int = 4
generator_dilation: int = 1
generator_stride: int = 1
generator_pad: int = -1
generator_bias: bool = False
generator_dropout: float = 0.0
generator_batch_norm: int = 0
generator_residual: bool = False
blank_weight: float = 0
blank_mode: str = "add"
blank_is_sil: bool = False
no_softmax: bool = False
smoothness_weight: float = 0.0
smoothing: float = 0.0
smoothing_one_sided: bool = False
gradient_penalty: float = 0.0
probabilistic_grad_penalty_slicing: bool = False
code_penalty: float = 0.0
mmi_weight: float = 0.0
target_dim: int = 64
target_downsample_rate: int = 2
gumbel: bool = False
hard_gumbel: bool = True
temp: Tuple[float, float, float] = (2, 0.1, 0.99995)
input_dim: int = 128
segmentation: SegmentationConfig = SegmentationConfig()
class Segmenter(nn.Module):
cfg: SegmentationConfig
def __init__(self, cfg: SegmentationConfig):
super().__init__()
self.cfg = cfg
self.subsample_rate = cfg.subsample_rate
def pre_segment(self, dense_x, dense_padding_mask):
return dense_x, dense_padding_mask
def logit_segment(self, logits, padding_mask):
return logits, padding_mask
class RandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
target_num = math.ceil(dense_x.size(1) * self.subsample_rate)
ones = torch.ones(dense_x.shape[:-1], device=dense_x.device)
indices, _ = ones.multinomial(target_num).sort(dim=-1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, dense_x.size(-1))
dense_x = dense_x.gather(1, indices_ld)
dense_padding_mask = dense_padding_mask.gather(1, index=indices)
return dense_x, dense_padding_mask
class UniformRandomSegmenter(Segmenter):
def pre_segment(self, dense_x, dense_padding_mask):
bsz, tsz, fsz = dense_x.shape
target_num = math.ceil(tsz * self.subsample_rate)
rem = tsz % target_num
if rem > 0:
dense_x = F.pad(dense_x, [0, 0, 0, target_num - rem])
dense_padding_mask = F.pad(
dense_padding_mask, [0, target_num - rem], value=True
)
dense_x = dense_x.view(bsz, target_num, -1, fsz)
dense_padding_mask = dense_padding_mask.view(bsz, target_num, -1)
if self.cfg.mean_pool:
dense_x = dense_x.mean(dim=-2)
dense_padding_mask = dense_padding_mask.all(dim=-1)
else:
ones = torch.ones((bsz, dense_x.size(2)), device=dense_x.device)
indices = ones.multinomial(1)
indices = indices.unsqueeze(-1).expand(-1, target_num, -1)
indices_ld = indices.unsqueeze(-1).expand(-1, -1, -1, fsz)
dense_x = dense_x.gather(2, indices_ld).reshape(bsz, -1, fsz)
dense_padding_mask = dense_padding_mask.gather(2, index=indices).reshape(
bsz, -1
)
return dense_x, dense_padding_mask
class JoinSegmenter(Segmenter):
def logit_segment(self, logits, padding_mask):
preds = logits.argmax(dim=-1)
if padding_mask.any():
preds[padding_mask] = -1 # mark pad
uniques = []
bsz, tsz, csz = logits.shape
for p in preds:
uniques.append(
p.cpu().unique_consecutive(return_inverse=True, return_counts=True)
)
new_tsz = max(u[0].numel() for u in uniques)
new_logits = logits.new_zeros(bsz, new_tsz, csz)
new_pad = padding_mask.new_zeros(bsz, new_tsz)
for b in range(bsz):
u, idx, c = uniques[b]
keep = u != -1
if self.cfg.remove_zeros:
keep.logical_and_(u != 0)
if self.training and not self.cfg.mean_pool_join:
u[0] = 0
u[1:] = c.cumsum(0)[:-1]
m = c > 1
r = torch.rand(m.sum())
o = (c[m] * r).long()
u[m] += o
new_logits[b, : u.numel()] = logits[b, u]
else:
new_logits[b].index_add_(
dim=0, index=idx.to(new_logits.device), source=logits[b]
)
new_logits[b, : c.numel()] /= c.unsqueeze(-1).to(new_logits.device)
new_sz = keep.sum()
if not keep.all():
kept_logits = new_logits[b, : c.numel()][keep]
new_logits[b, :new_sz] = kept_logits
if new_sz < new_tsz:
pad = new_tsz - new_sz
new_logits[b, -pad:] = 0
new_pad[b, -pad:] = True
return new_logits, new_pad
class UniformRandomJoinSegmenter(UniformRandomSegmenter, JoinSegmenter):
pass
SEGMENT_FACTORY = {
SegmentationType.NONE: Segmenter,
SegmentationType.RANDOM: RandomSegmenter,
SegmentationType.UNIFORM_RANDOM: UniformRandomSegmenter,
SegmentationType.UNIFORM_RANDOM_JOIN: UniformRandomJoinSegmenter,
SegmentationType.JOIN: JoinSegmenter,
}
class Discriminator(nn.Module):
def __init__(self, dim, cfg: Wav2vec_UConfig):
super().__init__()
inner_dim = cfg.discriminator_dim
kernel = cfg.discriminator_kernel
dilation = cfg.discriminator_dilation
self.max_pool = cfg.discriminator_max_pool
if cfg.discriminator_causal:
padding = kernel - 1
else:
padding = kernel // 2
def make_conv(in_d, out_d, k, p=0, has_dilation=True):
conv = nn.Conv1d(
in_d,
out_d,
kernel_size=k,
padding=p,
dilation=dilation if has_dilation else 1,
)
if cfg.discriminator_spectral_norm:
conv = nn.utils.spectral_norm(conv)
elif cfg.discriminator_weight_norm:
conv = nn.utils.weight_norm(conv)
return conv
inner_net = [
nn.Sequential(
make_conv(inner_dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
nn.Dropout(cfg.discriminator_dropout),
nn.GELU(),
)
for _ in range(cfg.discriminator_depth - 1)
] + [
make_conv(inner_dim, 1, kernel, padding, has_dilation=False),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_linear_emb:
emb_net = [make_conv(dim, inner_dim, 1)]
else:
emb_net = [
make_conv(dim, inner_dim, kernel, padding),
SamePad(kernel_size=kernel, causal=cfg.discriminator_causal),
]
if cfg.discriminator_act_after_linear:
emb_net.append(nn.GELU())
self.net = nn.Sequential(
*emb_net,
nn.Dropout(cfg.discriminator_dropout),
*inner_net,
)
def forward(self, x, padding_mask):
x = x.transpose(1, 2) # BTC -> BCT
x = self.net(x)
x = x.transpose(1, 2)
x_sz = x.size(1)
if padding_mask is not None and padding_mask.any() and padding_mask.dim() > 1:
padding_mask = padding_mask[:, : x.size(1)]
x[padding_mask] = float("-inf") if self.max_pool else 0
x_sz = x_sz - padding_mask.sum(dim=-1)
x = x.squeeze(-1)
if self.max_pool:
x, _ = x.max(dim=-1)
else:
x = x.sum(dim=-1)
x = x / x_sz
return x
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, cfg: Wav2vec_UConfig):
super().__init__()
self.cfg = cfg
self.output_dim = output_dim
self.stride = cfg.generator_stride
self.dropout = nn.Dropout(cfg.generator_dropout)
self.batch_norm = cfg.generator_batch_norm != 0
self.residual = cfg.generator_residual
padding = (
cfg.generator_kernel // 2 if cfg.generator_pad < 0 else cfg.generator_pad
)
self.proj = nn.Sequential(
TransposeLast(),
nn.Conv1d(
input_dim,
output_dim,
kernel_size=cfg.generator_kernel,
stride=cfg.generator_stride,
dilation=cfg.generator_dilation,
padding=padding,
bias=cfg.generator_bias,
),
TransposeLast(),
)
if self.batch_norm:
self.bn = nn.BatchNorm1d(input_dim)
self.bn.weight.data.fill_(cfg.generator_batch_norm)
if self.residual:
self.in_proj = nn.Linear(input_dim, input_dim)
def forward(self, dense_x, tokens, dense_padding_mask):
result = {}
if self.batch_norm:
dense_x = self.bn_padded_data(dense_x, dense_padding_mask)
if self.residual:
inter_x = self.in_proj(self.dropout(dense_x))
dense_x = dense_x + inter_x
result["inter_x"] = inter_x
dense_x = self.dropout(dense_x)
dense_x = self.proj(dense_x)
if self.stride > 1:
dense_padding_mask = dense_padding_mask[:, :: self.stride]
if dense_padding_mask.size(1) != dense_x.size(1):
new_padding = dense_padding_mask.new_zeros(dense_x.shape[:-1])
diff = new_padding.size(1) - dense_padding_mask.size(1)
if diff > 0:
new_padding[:, diff:] = dense_padding_mask
else:
assert diff < 0
new_padding = dense_padding_mask[:, :diff]
dense_padding_mask = new_padding
token_x = None
if tokens is not None:
token_x = dense_x.new_zeros(tokens.numel(), self.output_dim)
token_x.scatter_(1, tokens.view(-1, 1).long(), 1)
token_x = token_x.view(tokens.shape + (self.output_dim,))
result["dense_x"] = dense_x
result["token_x"] = token_x
result["dense_padding_mask"] = dense_padding_mask
return result
def bn_padded_data(self, feature, padding_mask):
normed_feature = feature.clone()
normed_feature[~padding_mask] = self.bn(
feature[~padding_mask].unsqueeze(-1)
).squeeze(-1)
return normed_feature
@register_model("wav2vec_u", dataclass=Wav2vec_UConfig)
class Wav2vec_U(BaseFairseqModel):
def calc_gradient_penalty(self, real_data, fake_data):
b_size = min(real_data.size(0), fake_data.size(0))
t_size = min(real_data.size(1), fake_data.size(1))
if self.cfg.probabilistic_grad_penalty_slicing:
def get_slice(data, dim, target_size):
size = data.size(dim)
diff = size - target_size
if diff <= 0:
return data
start = np.random.randint(0, diff + 1)
return data.narrow(dim=dim, start=start, length=target_size)
real_data = get_slice(real_data, 0, b_size)
real_data = get_slice(real_data, 1, t_size)
fake_data = get_slice(fake_data, 0, b_size)
fake_data = get_slice(fake_data, 1, t_size)
else:
real_data = real_data[:b_size, :t_size]
fake_data = fake_data[:b_size, :t_size]
alpha = torch.rand(real_data.size(0), 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.to(real_data.device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
disc_interpolates = self.discriminator(interpolates, None)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=real_data.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = (gradients.norm(2, dim=1) - 1) ** 2
return gradient_penalty
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.update_num = num_updates
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def discrim_step(self, num_updates):
return num_updates % 2 == 1
def get_groups_for_update(self, num_updates):
return "discriminator" if self.discrim_step(num_updates) else "generator"
def __init__(self, cfg: Wav2vec_UConfig, target_dict):
super().__init__()
self.cfg = cfg
self.zero_index = target_dict.index("<SIL>") if "<SIL>" in target_dict else 0
self.smoothness_weight = cfg.smoothness_weight
output_size = len(target_dict)
self.pad = target_dict.pad()
self.eos = target_dict.eos()
self.smoothing = cfg.smoothing
self.smoothing_one_sided = cfg.smoothing_one_sided
self.no_softmax = cfg.no_softmax
self.gumbel = cfg.gumbel
self.hard_gumbel = cfg.hard_gumbel
self.last_acc = None
self.gradient_penalty = cfg.gradient_penalty
self.code_penalty = cfg.code_penalty
self.mmi_weight = cfg.mmi_weight
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
self.blank_index = target_dict.index("<SIL>") if cfg.blank_is_sil else 0
assert self.blank_index != target_dict.unk()
self.discriminator = Discriminator(output_size, cfg)
for p in self.discriminator.parameters():
p.param_group = "discriminator"
self.pca_A = self.pca_b = None
d = cfg.input_dim
self.segmenter = SEGMENT_FACTORY[cfg.segmentation.type](cfg.segmentation)
self.generator = Generator(d, output_size, cfg)
for p in self.generator.parameters():
p.param_group = "generator"
for p in self.segmenter.parameters():
p.param_group = "generator"
self.max_temp, self.min_temp, self.temp_decay = cfg.temp
self.curr_temp = self.max_temp
self.update_num = 0
if self.mmi_weight > 0:
self.target_downsample_rate = cfg.target_downsample_rate
self.decoder = nn.Linear(d, cfg.target_dim)
for p in self.decoder.parameters():
p.param_group = "generator"
@classmethod
def build_model(cls, cfg, task):
return cls(cfg, task.target_dictionary)
def get_logits(
self,
net_output: Optional[Dict[str, List[Optional[torch.Tensor]]]],
normalize: bool = False,
):
logits = net_output["logits"]
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., self.blank_index] += self.blank_weight
elif self.blank_mode == "set":
logits[..., self.blank_index] = self.blank_weight
else:
raise Exception(f"invalid blank mode {self.blank_mode}")
padding = net_output["padding_mask"]
if padding.any():
logits[padding] = float("-inf")
logits[padding][..., self.blank_index] = float("inf")
if normalize:
logits = utils.log_softmax(logits.float(), dim=-1)
return logits.transpose(0, 1)
def get_normalized_probs(
self,
net_output: Tuple[
torch.Tensor, Optional[Dict[str, List[Optional[torch.Tensor]]]]
],
log_probs: bool,
sample: Optional[Dict[str, torch.Tensor]] = None,
):
logits = self.get_logits(net_output)
probs = super().get_normalized_probs(logits, log_probs, sample)
# BTC -> TBC for ctc
probs = probs.transpose(0, 1)
return probs
def normalize(self, dense_x):
bsz, tsz, csz = dense_x.shape
if dense_x.numel() == 0:
raise Exception(dense_x.shape)
_, k = dense_x.max(-1)
hard_x = (
dense_x.new_zeros(bsz * tsz, csz)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(-1, csz)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
code_perplexity = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
)
avg_probs = torch.softmax(dense_x.reshape(-1, csz).float(), dim=-1).mean(dim=0)
prob_perplexity = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
)
if not self.no_softmax:
if self.training and self.gumbel:
dense_x = F.gumbel_softmax(
dense_x.float(), tau=self.curr_temp, hard=self.hard_gumbel
).type_as(dense_x)
else:
dense_x = dense_x.softmax(-1)
return dense_x, code_perplexity, prob_perplexity
def forward(
self,
features,
padding_mask,
random_label=None,
dense_x_only=False,
segment=True,
aux_target=None,
):
if segment:
features, padding_mask = self.segmenter.pre_segment(features, padding_mask)
orig_size = features.size(0) * features.size(1) - padding_mask.sum()
gen_result = self.generator(features, random_label, padding_mask)
orig_dense_x, token_x = gen_result["dense_x"], gen_result["token_x"]
orig_dense_padding_mask = gen_result["dense_padding_mask"]
if segment:
dense_x, dense_padding_mask = self.segmenter.logit_segment(
orig_dense_x, orig_dense_padding_mask
)
else:
dense_x = orig_dense_x
dense_padding_mask = orig_dense_padding_mask
dense_logits = dense_x
prob_perplexity = None
code_perplexity = None
if not (self.no_softmax and dense_x_only):
dense_x, code_perplexity, prob_perplexity = self.normalize(dense_logits)
if dense_x_only or self.discriminator is None:
return {
"logits": dense_x,
"padding_mask": dense_padding_mask,
}
token_padding_mask = random_label == self.pad
dense_y = self.discriminator(dense_x, dense_padding_mask)
token_y = self.discriminator(token_x, token_padding_mask)
sample_size = features.size(0)
d_step = self.discrim_step(self.update_num)
fake_smooth = self.smoothing
real_smooth = self.smoothing
if self.smoothing_one_sided:
fake_smooth = 0
zero_loss = None
smoothness_loss = None
code_pen = None
mmi_loss = None
if d_step:
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_ones(dense_y.shape) - fake_smooth,
reduction="sum",
)
loss_token = F.binary_cross_entropy_with_logits(
token_y,
token_y.new_zeros(token_y.shape) + real_smooth,
reduction="sum",
)
if self.training and self.gradient_penalty > 0:
grad_pen = self.calc_gradient_penalty(token_x, dense_x)
grad_pen = grad_pen.sum() * self.gradient_penalty
else:
grad_pen = None
else:
grad_pen = None
loss_token = None
loss_dense = F.binary_cross_entropy_with_logits(
dense_y,
dense_y.new_zeros(dense_y.shape) + fake_smooth,
reduction="sum",
)
num_vars = dense_x.size(-1)
if prob_perplexity is not None:
code_pen = (num_vars - prob_perplexity) / num_vars
code_pen = code_pen * sample_size * self.code_penalty
if self.smoothness_weight > 0:
smoothness_loss = F.mse_loss(
dense_logits[:, :-1], dense_logits[:, 1:], reduction="none"
)
smoothness_loss[dense_padding_mask[:, 1:]] = 0
smoothness_loss = (
smoothness_loss.mean() * sample_size * self.smoothness_weight
)
if (self.mmi_weight > 0) and (aux_target is not None):
inter_x = self.decoder(gen_result["inter_x"])
if self.target_downsample_rate > 1:
aux_target = aux_target[:, :: self.target_downsample_rate]
max_t_len = min(aux_target.shape[1], inter_x.shape[1])
mmi_loss = F.cross_entropy(
inter_x[:, :max_t_len].transpose(1, 2),
aux_target[:, :max_t_len],
ignore_index=-1,
reduction="none",
)
mmi_loss = mmi_loss.mean() * mmi_loss.shape[0] * self.mmi_weight
result = {
"losses": {
"grad_pen": grad_pen,
"code_pen": code_pen,
"smoothness": smoothness_loss,
"mmi": mmi_loss,
},
"temp": self.curr_temp,
"code_ppl": code_perplexity,
"prob_ppl": prob_perplexity,
"d_steps": int(d_step),
"sample_size": sample_size,
}
suff = "_d" if d_step else "_g"
result["losses"]["dense" + suff] = loss_dense
result["losses"]["token" + suff] = loss_token
return result
| 22,945 | 32.351744 | 87 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/wav2vec_apply_cluster_faiss.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import sys
import faiss
import torch.nn.functional as F
from wav2vec_cluster_faiss import parse_faiss_specs, Wav2VecFeatureReader
def get_parser():
parser = argparse.ArgumentParser(description="apply clusters")
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='split to process', required=True)
parser.add_argument('--labels', help='split to process', default="phn")
parser.add_argument('--path', help='path to pca and centroids', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--max-tsz', type=int, help='batch kmeans up to this much', default=14)
# fmt: on
return parser
def get_iterator(args):
label_path = osp.join(args.data, f"{args.split}.{args.labels}")
if osp.exists(label_path):
lp = open(label_path, "r")
else:
lp = None
with open(osp.join(args.data, f"{args.split}.tsv"), "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [line.rstrip() for line in lines if len(line) > 0]
if lp is not None:
lbls = [line.rstrip() for line in lp]
else:
lbls = [None] * len(files)
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname, lbl in zip(files, lbls):
file = osp.join(root, fname.split("\t")[0])
feats = reader.get_feats(file)
yield feats.data, fname, lbl
return iterate, num, root
def main():
parser = get_parser()
args = parser.parse_args()
spec = osp.basename(args.path)
try:
faiss_spec = parse_faiss_specs(spec.rstrip("/"))[0]
except:
print(spec)
raise
print("Faiss Spec:", faiss_spec, file=sys.stderr)
if faiss_spec.pca:
A = torch.from_numpy(np.load(osp.join(args.path, "pca_A.npy"))).cuda()
b = torch.from_numpy(np.load(osp.join(args.path, "pca_b.npy"))).cuda()
print("Loaded PCA", file=sys.stderr)
centroids = np.load(osp.join(args.path, "centroids.npy"))
print("Loaded centroids", centroids.shape, file=sys.stderr)
res = faiss.StandardGpuResources()
index_flat = (
faiss.IndexFlatL2(centroids.shape[1])
if not faiss_spec.sphere
else faiss.IndexFlatIP(centroids.shape[1])
)
faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat)
faiss_index.add(centroids)
generator, num, root = get_iterator(args)
iterator = generator()
had_labels = False
label_path = osp.join(args.path, f"{args.split}.{args.labels}")
with torch.no_grad():
with open(osp.join(args.path, f"{args.split}.src"), "w") as fp, open(
osp.join(args.path, f"{args.split}.tsv"), "w"
) as pp, open(label_path, "w") as lp:
print(root, file=pp)
for f, fname, lbl in tqdm.tqdm(iterator, total=num):
if faiss_spec.pca:
f = torch.mm(f, A) + b
if faiss_spec.norm:
f = F.normalize(f, p=2, dim=-1)
f = f.cpu().numpy()
_, z = faiss_index.search(f, 1)
print(" ".join(str(x.item()) for x in z), file=fp)
print(fname, file=pp)
if lbl is not None:
print(lbl, file=lp)
had_labels = True
if not had_labels:
os.remove(label_path)
if __name__ == "__main__":
main()
| 4,015 | 30.131783 | 129 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/merge_clusters.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import numpy as np
import tqdm
import torch
import random
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--cluster-dir', help='where the clusters are')
parser.add_argument('--pooling', type=str, default='mean', choices=['mean', 'sample'], help='how to pool')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
cluster_path = osp.join(args.cluster_dir, args.split + ".src")
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
sizes = []
offsets = []
offset = 0
with open(source_path + ".lengths", "r") as len_f:
for line in len_f:
length = int(line.rstrip())
sizes.append(length)
offsets.append(offset)
offset += length
clusters = []
with open(cluster_path, "r") as cf:
for line in cf:
line = line.rstrip()
items = line.split()
items = list(map(int, items))
clusters.append(items)
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
def merge(feats, clust):
feats = torch.from_numpy(feats.copy())
clust = torch.LongTensor(clust)
_, counts = clust.unique_consecutive(return_counts=True)
curr = 0
merged = []
for c in counts:
c = c.item()
start = curr
end = curr + c
curr += c
if args.pooling == "mean":
new_x = feats[start:end].mean(dim=0)
elif args.pooling == "sample":
new_x = feats[start + int(random.random() * c)]
else:
raise NotImplementedError()
merged.append(new_x)
return torch.stack(merged, dim=0).numpy()
with open(save_path + ".lengths", "w") as l_f:
for size, offset, clust in tqdm.tqdm(
zip(sizes, offsets, clusters), total=len(sizes)
):
end = size + offset
feats = features[offset:end]
feats = merge(feats, clust)
print(len(feats), file=l_f)
npaa.append(feats)
if __name__ == "__main__":
main()
| 3,543 | 29.817391 | 110 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/remove_silence.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder
paths=shards/train.tsv
vads=shards/train.vads
python remove_silence.py --paths $paths --vads $vads
"""
import os
import argparse
import torch
import torchaudio
import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--tsv", default="", type=str)
parser.add_argument("--vads", default="", type=str)
parser.add_argument("--out", type=str)
params = parser.parse_args()
# load paths
paths = []
with open(params.tsv) as f:
root = next(f).rstrip()
for line in f:
paths.append(os.path.join(root, line.rstrip().split("\t")[0]))
# load vads
list_intervals = []
with open(params.vads) as f:
for line in f:
interval = [
[int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split()
]
list_intervals.append(interval)
# load audio and keep only intervals (i.e. remove silences)
for i in tqdm.trange(len(paths)):
data, _ = torchaudio.load(paths[i])
if len(list_intervals[i]) > 0:
data_filtered = torch.cat(
[data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]]
).unsqueeze(0)
else:
data_filtered = data
# YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH
# outpath = params.out + '/'.join(paths[i].split('/')[-1])
outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:])
if not os.path.isdir("/".join(outpath.split("/")[:-1])):
os.makedirs("/".join(outpath.split("/")[:-1]))
if not os.path.exists(outpath):
torchaudio.save(outpath, data_filtered, sample_rate=16000)
else:
print(outpath, "exists!")
| 1,927 | 29.125 | 128 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/apply_pca.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="transforms features via a given pca and stored them in target dir"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--pca-path', type=str, help='pca location. will append _A.npy and _b.npy', required=True)
parser.add_argument('--batch-size', type=int, default=2048000, help='batch size')
parser.add_argument('--unfiltered', action='store_true', help='process the unfiltered version')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
data_poth = source_path + "_unfiltered" if args.unfiltered else source_path
print(f"data path: {data_poth}")
features = np.load(data_poth + ".npy", mmap_mode="r")
pca_A = torch.from_numpy(np.load(args.pca_path + "_A.npy")).cuda()
pca_b = torch.from_numpy(np.load(args.pca_path + "_b.npy")).cuda()
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
copyfile(data_poth + ".lengths", save_path + ".lengths")
if osp.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if osp.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
batches = math.ceil(features.shape[0] / args.batch_size)
with torch.no_grad():
for b in tqdm.trange(batches):
start = b * args.batch_size
end = start + args.batch_size
x = torch.from_numpy(features[start:end]).cuda()
x = torch.matmul(x, pca_A) + pca_b
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| 2,496 | 31.428571 | 114 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/wav2vec_cluster_faiss.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import os
import os.path as osp
import random
import numpy as np
import tqdm
import torch
from collections import namedtuple
import faiss
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec model (if using wav2vec features)', required=True)
parser.add_argument('--sample-pct', '-r', type=float, help='percentage of timesteps to sample', default=0)
parser.add_argument('--layer', '-l', type=int, help='which layer to read', default=14)
parser.add_argument('--faiss-specs', '-f', type=str,
help='faiss index specs; separated by space '
'format is: PCAx_NORM_CLUSx_SPHERICAL -> '
'PCAx if exists first apply PCA '
'NORM if exists, normalize the vector by L2 norm '
'CLUSx must exist, cluster to x clusters '
'SPEHRICAL if exists, apply spherical kmeans',
default='l2')
# fmt: on
return parser
faiss_spec = namedtuple("faiss_spec", ["pca", "norm", "n_clus", "sphere", "spec_str"])
def parse_faiss_specs(specs_str):
specs = []
for ss in specs_str.split():
comps = ss.split("_")
pca = 0
norm = False
n_clus = 0
sphere = False
for c in comps:
if c.startswith("PCA"):
pca = int(c[3:])
elif c == "NORM":
norm = True
elif c.startswith("CLUS"):
n_clus = int(c[4:])
elif c == "SPHERICAL":
sphere = True
assert n_clus > 0
specs.append(
faiss_spec(pca=pca, norm=norm, n_clus=n_clus, sphere=sphere, spec_str=ss)
)
return specs
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file)
self.layer = layer
if "cfg" in state:
w2v_args = state["cfg"]
task = fairseq.tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
else:
w2v_args = state["args"]
task = fairseq.tasks.setup_task(w2v_args)
model = task.build_model(w2v_args)
model.load_state_dict(state["model"], strict=True)
model.eval()
model.cuda()
self.model = model
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).view(1, -1).float().cuda()
res = self.model(
source=source, mask=False, features_only=True, layer=self.layer
)
return res["layer_results"][self.layer][0].squeeze(1)
def get_iterator(args):
with open(args.data, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
if getattr(args, "sample_pct", 0) > 0:
files = random.sample(files, int(args.sample_pct * len(files)))
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
feats = reader.get_feats(fname)
yield feats.cpu().numpy()
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
faiss_specs = parse_faiss_specs(args.faiss_specs)
print("Faiss Specs:", faiss_specs)
feat_path = osp.join(args.save_dir, "features")
if osp.exists(feat_path + ".npy"):
feats = np.load(feat_path + ".npy")
else:
generator, num = get_iterator(args)
iterator = generator()
feats = []
for f in tqdm.tqdm(iterator, total=num):
feats.append(f)
del iterator
del generator
feats = np.concatenate(feats)
print(feats.shape)
os.makedirs(args.save_dir, exist_ok=True)
# np.save(feat_path, feats)
gc.collect()
torch.cuda.empty_cache()
reload = False
for spec in faiss_specs:
print("Processing spec", spec)
if reload:
print("Reloading...")
del feats
gc.collect()
feats = np.load(feat_path + ".npy")
save_path = osp.join(args.save_dir, spec.spec_str)
os.makedirs(save_path, exist_ok=True)
d = feats.shape[-1]
x = feats
if spec.pca > 0:
print("Computing PCA")
pca = faiss.PCAMatrix(d, spec.pca)
pca.train(x)
d = spec.pca
b = faiss.vector_to_array(pca.b)
A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
np.save(osp.join(save_path, "pca_A"), A.T)
np.save(osp.join(save_path, "pca_b"), b)
print("Applying PCA")
x = pca.apply_py(x)
if spec.norm:
reload = spec.pca <= 0
print("Normalizing")
faiss.normalize_L2(x)
print("Computing kmeans")
kmeans = faiss.Kmeans(
d,
spec.n_clus,
niter=50,
verbose=True,
spherical=spec.sphere,
max_points_per_centroid=feats.shape[0],
gpu=True,
nredo=3,
)
kmeans.train(x)
np.save(osp.join(save_path, "centroids"), kmeans.centroids)
del kmeans
del x
gc.collect()
if __name__ == "__main__":
main()
| 6,315 | 28.933649 | 129 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/mean_pool.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import math
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
def get_parser():
parser = argparse.ArgumentParser(
description="mean pools representations by compressing uniform splits of the data"
)
# fmt: off
parser.add_argument('source', help='directory with features')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--subsample-rate', type=float, default=0.5, help='size to subsample data to')
parser.add_argument('--remove-extra', action='store_true', help='if true, removes extra states that cant be pooled, otherwise pads with 0s')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f"data path: {source_path}")
features = np.load(source_path + ".npy", mmap_mode="r")
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile(source_path + ".tsv", save_path + ".tsv")
if os.path.exists(source_path + ".phn"):
copyfile(source_path + ".phn", save_path + ".phn")
if os.path.exists(source_path + ".wrd"):
copyfile(source_path + ".wrd", save_path + ".wrd")
if os.path.exists(osp.join(args.source, "dict.phn.txt")):
copyfile(
osp.join(args.source, "dict.phn.txt"),
osp.join(args.save_dir, "dict.phn.txt"),
)
if osp.exists(save_path + ".npy"):
os.remove(save_path + ".npy")
npaa = NpyAppendArray(save_path + ".npy")
with open(source_path + ".lengths", "r") as lf:
lengths = lf.readlines()
fsz = features.shape[-1]
start = 0
with torch.no_grad():
with open(save_path + ".lengths", "w") as lengths_out:
for length in tqdm.tqdm(lengths):
length = int(length)
end = start + length
feats = features[start:end]
start += length
x = torch.from_numpy(feats).cuda()
target_num = math.ceil(length * args.subsample_rate)
rem = length % target_num
if rem > 0:
if args.remove_extra:
to_rem = target_num - rem
target_num -= 1
x = x[:-to_rem]
else:
to_add = target_num - rem
x = F.pad(x, [0, 0, 0, to_add])
x[-to_add:] = x[-to_add - 1]
x = x.view(target_num, -1, fsz)
x = x.mean(dim=-2)
print(target_num, file=lengths_out)
npaa.append(x.cpu().numpy())
if __name__ == "__main__":
main()
| 3,187 | 30.88 | 144 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as osp
import tqdm
import torch
import torch.nn.functional as F
from shutil import copyfile
from npy_append_array import NpyAppendArray
import fairseq
import soundfile as sf
def get_parser():
parser = argparse.ArgumentParser(
description="compute kmeans codebook from kaldi-computed feats"
)
# fmt: off
parser.add_argument('data', help='location of tsv files')
parser.add_argument('--split', help='which split to read', required=True)
parser.add_argument('--save-dir', help='where to save the output', required=True)
parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True)
parser.add_argument('--layer', type=int, default=14, help='which layer to use')
# fmt: on
return parser
class Wav2VecFeatureReader(object):
def __init__(self, cp_file, layer):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[cp_file]
)
model = model[0]
model.eval()
model.cuda()
self.model = model
self.task = task
self.layer = layer
def read_audio(self, fname):
"""Load an audio file and return PCM along with the sample rate"""
wav, sr = sf.read(fname)
assert sr == 16e3
return wav
def get_feats(self, loc):
x = self.read_audio(loc)
with torch.no_grad():
source = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
assert source.dim() == 1, source.dim()
with torch.no_grad():
source = F.layer_norm(source, source.shape)
source = source.view(1, -1)
m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer)
return m_res["x"].squeeze(0).cpu()
def get_iterator(args):
with open(osp.join(args.data, args.split) + ".tsv", "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0]
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint, args.layer)
def iterate():
for fname in files:
w2v_feats = reader.get_feats(fname)
yield w2v_feats
return iterate, num
def main():
parser = get_parser()
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
def create_files(dest):
copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv")
if osp.exists(osp.join(args.data, args.split) + ".wrd"):
copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd")
if osp.exists(osp.join(args.data, args.split) + ".phn"):
copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn")
if osp.exists(dest + ".npy"):
os.remove(dest + ".npy")
npaa = NpyAppendArray(dest + ".npy")
return npaa
save_path = osp.join(args.save_dir, args.split)
npaa = create_files(save_path)
generator, num = get_iterator(args)
iterator = generator()
with open(save_path + ".lengths", "w") as l_f:
for w2v_feats in tqdm.tqdm(iterator, total=num):
print(len(w2v_feats), file=l_f)
if len(w2v_feats) > 0:
npaa.append(w2v_feats.numpy())
if __name__ == "__main__":
main()
| 3,673 | 29.616667 | 105 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/data/extracted_features_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
class ExtractedFeaturesDataset(FairseqDataset):
def __init__(
self,
path,
split,
min_length=3,
max_length=None,
labels=None,
label_dict=None,
shuffle=True,
sort_by_length=True,
aux_target_postfix=None,
):
super().__init__()
self.min_length = min_length
self.max_length = max_length
self.shuffle = shuffle
self.sort_by_length = sort_by_length
self.label_dict = label_dict
if labels is not None:
assert label_dict is not None
self.sizes = []
self.offsets = []
self.labels = []
self.aux_tgt = None
path = os.path.join(path, split)
data_path = path
self.data = np.load(data_path + ".npy", mmap_mode="r")
offset = 0
skipped = 0
if not os.path.exists(path + f".{labels}"):
labels = None
with open(data_path + ".lengths", "r") as len_f, open(
path + f".{labels}", "r"
) if labels is not None else contextlib.ExitStack() as lbl_f:
for line in len_f:
length = int(line.rstrip())
lbl = None if labels is None else next(lbl_f).rstrip().split()
if length >= min_length and (
max_length is None or length <= max_length
):
self.sizes.append(length)
self.offsets.append(offset)
if lbl is not None:
self.labels.append(lbl)
offset += length
self.sizes = np.asarray(self.sizes)
self.offsets = np.asarray(self.offsets)
if aux_target_postfix is not None:
if not os.path.exists(path+f".{aux_target_postfix}"):
logger.info(f"auxaliry target for {split} missing")
else:
with open(path+f".{aux_target_postfix}", "r") as t_f:
self.aux_tgt = [
torch.LongTensor(list(map(int,seg.strip().split())))\
for seg in t_f]
logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples")
def __getitem__(self, index):
offset = self.offsets[index]
end = self.sizes[index] + offset
feats = torch.from_numpy(self.data[offset:end].copy()).float()
res = {"id": index, "features": feats}
if len(self.labels) > 0:
res["target"] = self.label_dict.encode_line(
self.labels[index],
line_tokenizer=lambda x: x,
append_eos=False,
)
if self.aux_tgt:
res["aux_target"] = self.aux_tgt[index]
return res
def __len__(self):
return len(self.sizes)
def collater(self, samples):
if len(samples) == 0:
return {}
features = [s["features"] for s in samples]
sizes = [len(s) for s in features]
target_size = max(sizes)
collated_features = features[0].new_zeros(
len(features), target_size, features[0].size(-1)
)
padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False)
for i, (f, size) in enumerate(zip(features, sizes)):
collated_features[i, :size] = f
padding_mask[i, size:] = True
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"features": collated_features, "padding_mask": padding_mask},
}
if len(self.labels) > 0:
target = data_utils.collate_tokens(
[s["target"] for s in samples],
pad_idx=self.label_dict.pad(),
left_pad=False,
)
res["target"] = target
if self.aux_tgt:
idxs = torch.nn.utils.rnn.pad_sequence(
[s["aux_target"] for s in samples],
batch_first=True,
padding_value=-1,
)
res["net_input"]["aux_target"] = idxs
return res
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
if self.sort_by_length:
order.append(self.sizes)
return np.lexsort(order)[::-1]
else:
return order[0]
| 5,038 | 28.994048 | 87 | py |
rej-summ | rej-summ-main/examples/wav2vec/unsupervised/tasks/unpaired_audio_text.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from dataclasses import dataclass, field
import logging
import math
import os
from typing import Optional
import torch
from fairseq.logging import metrics
from fairseq.tasks import FairseqTask, register_task
from ..data import ExtractedFeaturesDataset, RandomInputDataset
from fairseq.data import (
Dictionary,
data_utils,
StripTokenDataset,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed.utils import get_data_parallel_world_size
from omegaconf import MISSING
from examples.speech_recognition.kaldi.kaldi_decoder import (
KaldiDecoder,
KaldiDecoderConfig,
)
logger = logging.getLogger(__name__)
@dataclass
class DecodingConfig(FairseqDataclass):
kenlm_path: Optional[str] = None
lm_weight: float = 0
blank_weight: float = 0
@dataclass
class UnpairedAudioTextConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to data directory containing audio"}
)
text_data: str = field(
default=MISSING, metadata={"help": "path to data directory containing text"}
)
max_length: Optional[int] = None
labels: Optional[str] = field(
default=None,
metadata={"help": "extension of the label file to load, used for fine-tuning"},
)
aux_target_postfix: Optional[str] = field(
default=None,
metadata={"help": "auxaliry target filename extension"},
)
unfiltered: bool = field(
default=False, metadata={"help": "load data with _unfiltered suffix"}
)
ctc_eval: bool = field(
default=False, metadata={"help": "eval UER as if computed by CTC"}
)
sort_by_length: bool = field(
default=True, metadata={"help": "sort examples by length of audio timesteps"}
)
shuffle: bool = field(default=True, metadata={"help": "shuffle examples"})
append_eos: bool = field(default=False, metadata={"help": "append eos"})
uppercase: Optional[bool] = field(
default=False, metadata={"help": "uppercase for LM score computation"}
)
skipwords: Optional[str] = field(
default="",
metadata={
"help": "comma-separated words to be removed for LM score computation"
},
)
kenlm_path: Optional[str] = None
vocab_usage_power: float = 2
word_decoder_config: Optional[KaldiDecoderConfig] = None
word_kenlm_path: Optional[str] = None
decoding_config: DecodingConfig = DecodingConfig()
@register_task("unpaired_audio_text", dataclass=UnpairedAudioTextConfig)
class UnpairedAudioText(FairseqTask):
""" """
cfg: UnpairedAudioTextConfig
def __init__(
self,
cfg: UnpairedAudioTextConfig,
source_dictionary=None,
target_dictionary=None,
):
super().__init__(cfg)
self._target_dictionary = target_dictionary
self._source_dictionary = source_dictionary
self.num_symbols = (
len([s for s in target_dictionary.symbols if not s.startswith("madeup")])
- target_dictionary.nspecial
)
self.sil_id = (
target_dictionary.index("<SIL>") if "<SIL>" in target_dictionary else -1
)
self.kenlm = None
if cfg.kenlm_path is not None:
import kenlm
self.kenlm = kenlm.Model(cfg.kenlm_path)
self.word_kenlm = None
if cfg.word_kenlm_path is not None:
import kenlm
self.word_kenlm = kenlm.Model(cfg.word_kenlm_path)
self.uppercase = cfg.uppercase
self.skipwords = set(cfg.skipwords.split(","))
def str_postprocess(s):
s = " ".join(w for w in s.split() if w not in self.skipwords)
s = s.upper() if self.uppercase else s
return s
self.str_postprocess = str_postprocess
self.compute_lm_score = lambda s: self.kenlm.score(self.str_postprocess(s))
self.compute_word_score = None
if cfg.word_decoder_config is not None:
self.kaldi_decoder = KaldiDecoder(cfg.word_decoder_config, beam=10)
def compute_word_score(logits, padding):
res = self.kaldi_decoder.decode(logits, padding)
for r in res:
r = r.result()
assert len(r) == 1
r = r[0]
yield r["score"], r["words"]
self.compute_word_score = compute_word_score
@classmethod
def setup_task(cls, cfg: UnpairedAudioTextConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (AudioPretrainingConfig): configuration of this task
"""
dict_path = os.path.join(cfg.text_data, "dict.txt")
if os.path.exists(dict_path):
target_dictionary = Dictionary.load(dict_path)
else:
dict_path = os.path.join(cfg.data, f"dict.{cfg.labels}.txt")
target_dictionary = Dictionary.load(dict_path)
return cls(cfg, target_dictionary=target_dictionary)
def optimizer_step(self, optimizer, model, update_num):
if hasattr(model, "get_groups_for_update"):
groups = model.get_groups_for_update(update_num)
optimizer.step(groups={groups})
else:
optimizer.step()
def valid_step(self, sample, model, criterion):
res = model(
**sample["net_input"],
dense_x_only=True,
)
dense_x = res["logits"]
padding_mask = res["padding_mask"]
word_scores = None
if self.compute_word_score is not None:
word_scores = self.compute_word_score(dense_x.cpu(), padding_mask.cpu())
z = dense_x.argmax(-1)
z[padding_mask] = self.target_dictionary.pad()
vocab_seen = torch.zeros(self.num_symbols, dtype=torch.bool)
import editdistance
c_err = 0
c_len = 0
pred_c_len = 0
lm_score_sum = 0
for i, (x, t, id) in enumerate(
zip(
z,
sample["target"] if "target" in sample else [None] * len(z),
sample["id"],
)
):
if t is not None:
t = t[(t >= self.target_dictionary.nspecial)]
x = x[
(x >= self.target_dictionary.nspecial)
& (x < (self.num_symbols + self.target_dictionary.nspecial))
]
if self.sil_id >= 0:
x = x[x != self.sil_id]
vocab_seen[x - self.target_dictionary.nspecial] = True
pred_units_arr = x
if self.cfg.ctc_eval:
pred_units_arr = pred_units_arr.unique_consecutive()
pred_units_arr = pred_units_arr[pred_units_arr != 0]
if id == 0:
if t is not None:
logger.info(f"REF: {self.target_dictionary.string(t)}")
logger.info(f"HYP: {self.target_dictionary.string(pred_units_arr)}")
if self.kenlm is not None:
if t is not None:
ref_lm_s = self.compute_lm_score(
self.target_dictionary.string(t)
)
logger.info(
f"LM [REF]: {ref_lm_s}, {math.pow(10, -ref_lm_s / (len(t) + 1))}"
)
hyp_lm_s = self.compute_lm_score(
self.target_dictionary.string(pred_units_arr)
)
logger.info(
f"LM [HYP]: {hyp_lm_s}, {math.pow(10, -hyp_lm_s / (len(pred_units_arr) + 1))}"
)
pred_units_arr = pred_units_arr.tolist()
pred_c_len += len(pred_units_arr)
if t is not None:
t = t.tolist()
c_err += editdistance.eval(pred_units_arr, t)
c_len += len(t)
else:
c_len = pred_c_len
if self.kenlm is not None:
pred_str = self.target_dictionary.string(pred_units_arr)
lm_score = self.compute_lm_score(pred_str)
lm_score_sum += lm_score
kaldi_score_sum = 0
word_lm_sum = 0
num_words = 0
if word_scores is not None:
for score, words in word_scores:
kaldi_score_sum += score
num_words += len(words)
if self.word_kenlm is not None:
word_lm_sum += self.kenlm.score(" ".join(words))
try:
world_size = get_data_parallel_world_size()
except:
world_size = 1
logging_output = {
"loss": c_err,
"_num_char_errors": c_err,
"_num_chars": c_len,
"_num_pred_chars": pred_c_len,
"ntokens": c_len,
"nsentences": z.size(0),
"sample_size": c_len,
"_world_size": world_size,
"_lm_score_sum": lm_score_sum,
"_kaldi_score_sum": kaldi_score_sum,
"_word_lm_sum": word_lm_sum,
"_num_words": num_words,
"_vocab_seen": vocab_seen,
}
return c_err, c_len, logging_output
def load_dataset(self, split: str, task_cfg: FairseqDataclass = None, **kwargs):
data_path = self.cfg.data
task_cfg = task_cfg or self.cfg
has_unpaired_text = os.path.exists(
os.path.join(self.cfg.text_data, f"{split}.idx")
)
self.datasets[split] = ExtractedFeaturesDataset(
path=data_path,
split=split,
min_length=3,
max_length=task_cfg.max_length,
labels=None if has_unpaired_text else task_cfg.labels,
label_dict=self.target_dictionary,
shuffle=getattr(task_cfg, "shuffle", True),
sort_by_length=task_cfg.sort_by_length,
aux_target_postfix=task_cfg.aux_target_postfix,
)
logger.info(f"split {split} has unpaired text? {has_unpaired_text}")
if has_unpaired_text:
text_dataset = data_utils.load_indexed_dataset(
os.path.join(self.cfg.text_data, split), self.target_dictionary
)
text_dataset = StripTokenDataset(text_dataset, self.target_dictionary.eos())
self.datasets[split] = RandomInputDataset(
self.datasets[split],
text_dataset,
["random_label"],
add_to_input=True,
pad_idx=self.target_dictionary.pad(),
)
@property
def source_dictionary(self):
return self._source_dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self._target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
num_pred_chars = sum(
log.get("_num_pred_chars", zero) for log in logging_outputs
)
lm_score_sum = sum(log.get("_lm_score_sum", zero) for log in logging_outputs)
vocab_seen = (
sum(log.get("_vocab_seen", zero) for log in logging_outputs)
.bool()
.sum()
.item()
)
kaldi_score_sum = sum(
log.get("_kaldi_score_sum", zero) for log in logging_outputs
)
word_lm_sum = sum(log.get("_word_lm_sum", zero) for log in logging_outputs)
metrics.log_scalar_sum("_num_char_errors", num_char_errors)
metrics.log_scalar_sum("_num_chars", num_chars)
metrics.log_scalar_sum("_num_word_errors", num_word_errors)
metrics.log_scalar_sum("_num_words", num_words)
metrics.log_scalar_sum("lm_score_sum", lm_score_sum)
metrics.log_scalar_sum("num_pred_chars", num_pred_chars)
if self.cfg.word_kenlm_path is not None:
metrics.log_scalar_sum("kaldi_score_sum", kaldi_score_sum)
metrics.log_scalar_sum("word_lm_sum", word_lm_sum)
if num_chars > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
if lm_score_sum < 0 and vocab_seen > 0:
metrics.log_scalar("vocab_seen_pct", vocab_seen / self.num_symbols)
metrics.log_derived(
"weighted_lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
metrics.log_derived(
"lm_ppl",
lambda meters: math.pow(
10,
-meters["lm_score_sum"].sum
/ (
meters["num_pred_chars"].sum + meters["nsentences"].sum
), # account for </s>
),
)
else:
metrics.log_derived("weighted_lm_ppl", lambda meters: float("inf"))
if num_words > 0:
if word_lm_sum != 0:
metrics.log_derived(
"word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
),
)
metrics.log_derived(
"weighted_word_lm_ppl",
lambda meters: math.pow(
10,
-meters["word_lm_sum"].sum
/ (
meters["_num_words"].sum + meters["nsentences"].sum
), # account for </s>
)
/ meters["vocab_seen_pct"].avg ** self.cfg.vocab_usage_power,
)
if self.cfg.word_kenlm_path is not None:
metrics.log_derived(
"kaldi_score",
lambda meters: meters["kaldi_score_sum"].sum
/ meters["nsentences"].sum,
)
def build_model(self, cfg: FairseqDataclass, from_checkpoint=False):
model = super().build_model(cfg)
return model
| 15,658 | 33.567329 | 102 | py |
rej-summ | rej-summ-main/examples/criss/save_encoder.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
from fairseq.utils import safe_hasattr
def get_avg_pool(
models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False
):
model = EnsembleModel(models)
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32)
encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype(
np.float32
)
encoder_mask = np.expand_dims(encoder_mask.T, axis=2)
if has_langtok:
encoder_mask = encoder_mask[1:, :, :]
np_encoder_outs = np_encoder_outs[1, :, :]
masked_encoder_outs = encoder_mask * np_encoder_outs
avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0)
return avg_pool
def main(args):
assert args.path is not None, "--path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
args.beam = 1
utils.import_user_module(args)
if args.max_tokens is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print("| loading model(s) from {}".format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(":"),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_positions=utils.resolve_max_positions(
task.max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
num_sentences = 0
source_sentences = []
shard_id = 0
all_avg_pool = None
encoder_has_langtok = (
safe_hasattr(task.args, "encoder_langtok")
and task.args.encoder_langtok is not None
and safe_hasattr(task.args, "lang_tok_replacing_bos_eos")
and not task.args.lang_tok_replacing_bos_eos
)
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
if sample is None:
print("Skipping None")
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
with torch.no_grad():
avg_pool = get_avg_pool(
models,
sample,
prefix_tokens,
src_dict,
args.post_process,
has_langtok=encoder_has_langtok,
)
if all_avg_pool is not None:
all_avg_pool = np.concatenate((all_avg_pool, avg_pool))
else:
all_avg_pool = avg_pool
if not isinstance(sample["id"], list):
sample_ids = sample["id"].tolist()
else:
sample_ids = sample["id"]
for i, sample_id in enumerate(sample_ids):
# Remove padding
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.post_process)
else:
src_str = ""
if not args.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str))
source_sentences.append(f"{sample_id}\t{src_str}")
num_sentences += sample["nsentences"]
if all_avg_pool.shape[0] >= 1000000:
with open(
f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}",
"w",
) as avg_pool_file:
all_avg_pool.tofile(avg_pool_file)
with open(
f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}",
"w",
) as sentence_file:
sentence_file.writelines(f"{line}\n" for line in source_sentences)
all_avg_pool = None
source_sentences = []
shard_id += 1
if all_avg_pool is not None:
with open(
f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w"
) as avg_pool_file:
all_avg_pool.tofile(avg_pool_file)
with open(
f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w"
) as sentence_file:
sentence_file.writelines(f"{line}\n" for line in source_sentences)
return None
def cli_main():
parser = options.get_generation_parser()
parser.add_argument(
"--encoder-save-dir",
default="",
type=str,
metavar="N",
help="directory to save encoder outputs",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 7,473 | 33.762791 | 90 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/generate_waveform_from_code.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
from pathlib import Path
import random
import soundfile as sf
import torch
from tqdm import tqdm
from fairseq import utils
from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def dump_result(args, sample_id, pred_wav, suffix=""):
sf.write(
f"{args.results_path}/{sample_id}{suffix}_pred.wav",
pred_wav.detach().cpu().numpy(),
16000,
)
def load_code(in_file):
with open(in_file) as f:
out = [list(map(int, line.strip().split())) for line in f]
return out
def main(args):
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
with open(args.vocoder_cfg) as f:
vocoder_cfg = json.load(f)
vocoder = CodeHiFiGANVocoder(args.vocoder, vocoder_cfg)
if use_cuda:
vocoder = vocoder.cuda()
multispkr = vocoder.model.multispkr
if multispkr:
logger.info("multi-speaker vocoder")
num_speakers = vocoder_cfg.get(
"num_speakers", 200
) # following the default in codehifigan to set to 200
assert (
args.speaker_id < num_speakers
), f"invalid --speaker-id ({args.speaker_id}) with total #speakers = {num_speakers}"
data = load_code(args.in_code_file)
Path(args.results_path).mkdir(exist_ok=True, parents=True)
for i, d in tqdm(enumerate(data), total=len(data)):
x = {
"code": torch.LongTensor(d).view(1, -1),
}
suffix = ""
if multispkr:
spk = (
random.randint(0, num_speakers - 1)
if args.speaker_id == -1
else args.speaker_id
)
suffix = f"_spk{spk}"
x["spkr"] = torch.LongTensor([spk]).view(1, 1)
x = utils.move_to_cuda(x) if use_cuda else x
wav = vocoder(x, args.dur_prediction)
dump_result(args, i, wav, suffix=suffix)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--in-code-file", type=str, required=True, help="one unit sequence per line"
)
parser.add_argument(
"--vocoder", type=str, required=True, help="path to the CodeHiFiGAN vocoder"
)
parser.add_argument(
"--vocoder-cfg",
type=str,
required=True,
help="path to the CodeHiFiGAN vocoder config",
)
parser.add_argument("--results-path", type=str, required=True)
parser.add_argument(
"--dur-prediction",
action="store_true",
help="enable duration prediction (for reduced/unique code sequences)",
)
parser.add_argument(
"--speaker-id",
type=int,
default=-1,
help="Speaker id (for vocoder that supports multispeaker). Set to -1 to randomly sample speakers.",
)
parser.add_argument("--cpu", action="store_true", help="run on CPU")
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| 3,285 | 27.08547 | 107 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/benchmarking/core.py | import timeit
import logging
import torch
from pypapi import events, papi_high as high
from memory_profiler import memory_usage
from torch import nn
from argparse import Namespace
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.data import data_utils as fairseq_data_utils
from fairseq import checkpoint_utils, tasks, utils
from fairseq.models.text_to_speech.vocoder import CodeHiFiGANVocoder
from examples.hubert.simple_kmeans.dump_hubert_feature import HubertFeatureReader
from examples.hubert.simple_kmeans.dump_km_label import ApplyKmeans
from fairseq_cli.generate import get_symbols_to_strip_from_output
import soundfile as sf
import ast
import json
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
torch.manual_seed(1)
torch.set_deterministic(True)
class BenchmarkingBase(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.s2x_task = None
def warm_up(self, sample, repeat):
"""Warm up the model"""
for _i in range(repeat):
self.forward(sample)
logger.info(f"Model warmed up by running inference {repeat} times")
def benchmark_run_time(self, dataset, repeat):
"""Benchmark average runtime for the model by calling benchmark_run_time_single_sample function"""
logger.info("Starting run time benchmarking")
time_elapsed = 0
for i, sample in enumerate(dataset):
time_elapsed += self.benchmark_run_time_single_sample(sample, repeat=repeat)
if i % 100 == 0:
logger.info(f"Benchmarked run time for {i}/{len(dataset)} samples")
total_time_elapsed = time_elapsed / len(dataset)
return total_time_elapsed
def benchmark_run_time_single_sample(self, sample, repeat):
"""Benchmark average runtime for a single sample using timeit library. Units are seconds"""
timer = timeit.Timer(lambda: self.forward(sample))
time_elapsed = timer.timeit(repeat)
return time_elapsed / repeat
def count_flops(
self,
dataset,
repeat,
):
"""Use PYPAPI library to count average flops for model inference.
Note: It only works if the model is being run on cpu"""
logger.info("Starting flop counter")
high.start_counters([events.PAPI_DP_OPS])
for i, sample in enumerate(dataset):
for _r in range(repeat):
self.forward(sample)
if i % 100 == 0:
logger.info(f"Counted flops for {i}/{len(dataset)} samples")
flops = high.stop_counters()
flops = round(flops[0] / (repeat * len(dataset)))
return flops
def max_memory(self, dataset, repeat):
"""Compute average max memory consumed by model inference. Units are MiB"""
logger.info("Starting memory benchmarking")
total_memory = 0
for i, sample in enumerate(dataset):
for _r in range(repeat):
total_memory += max(memory_usage((self.forward, (sample,), {})))
if i % 100 == 0:
logger.info(f"Benchmarked memory for {i}/{len(dataset)} samples")
total_memory = total_memory / (repeat * len(dataset))
return total_memory
def gather_all_metrics(self, dataset, repeat):
run_time = self.benchmark_run_time(dataset, repeat)
max_memory = self.max_memory(dataset, repeat)
flops = self.count_flops(dataset, repeat)
return run_time, max_memory, flops
def dump_final_speech_output(
self, dataset, output_dir, resample_fn, sample_rate, prefix=None
):
for i, sample in enumerate(dataset):
hypo = self.forward(sample)[0]
def to_np(x):
return x.detach().cpu().numpy()
try:
wave_preds = to_np(resample_fn(hypo["waveform"]))
sf.write(
f"{output_dir}/{prefix}_{i}_pred.wav",
wave_preds,
sample_rate,
)
except Exception as e:
raise Exception(
f" Encountered {e} - Invalid waveform. Make sure the model outputs a waveform"
)
class Processing(BenchmarkingBase):
"""Class similar to fairseq_cli/generate.py. Supports ASR, MT and ST model inference"""
def __init__(self, args):
super().__init__()
self.use_cuda = not getattr(args, "cpu", False)
self.setUp(args)
self.training = False
self.s2x_task = self.task
def setUp(self, cfg):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
self.task = tasks.setup_task(cfg.task)
self.tgt_dict = self.task.target_dictionary
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _ = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides={},
task=self.task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=False,
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
if len(models) > 1:
raise Exception("Currently loading multiple models is not supported")
self.model = models[0]
# Optimize model for generation
if cfg.common.fp16:
self.model.half()
if self.use_cuda:
self.model.cuda()
self.model.prepare_for_inference_(cfg)
self.generator = self.task.build_generator(
[self.model],
cfg.generation,
extra_gen_cls_kwargs={},
)
# Handle tokenization and BPE
self.tokenizer = self.task.build_tokenizer(cfg.tokenizer)
self.bpe = self.task.build_bpe(cfg.bpe)
self.remove_bpe = cfg.common_eval.post_process
def encode_source(self, src):
"""Method to generate source tokens from a string"""
if self.tokenizer is not None:
src = self.tokenizer.encode(src)
if self.bpe is not None:
src = self.bpe.encode(src)
src_tokens = self.task.source_dictionary.encode_line(src).long()
src_lens = src_tokens.size(0)
return {
"net_input": {
"src_tokens": src_tokens.view(1, src_lens),
"src_lengths": torch.tensor([src_lens]),
}
}
def decode_target(self, hypos):
"""Method to decode target string from tokens"""
hypo_str = self.tgt_dict.string(
hypos[0][0]["tokens"].int().cpu(),
self.remove_bpe,
get_symbols_to_strip_from_output(self.generator),
)
if self.bpe is not None:
hypo_str = self.bpe.decode(hypo_str)
if self.tokenizer is not None:
hypo_str = self.tokenizer.decode(hypo_str)
return hypo_str
def forward(self, sample):
hypos = self.task.inference_step(
self.generator,
[self.model],
sample,
prefix_tokens=None,
constraints=None,
)
return hypos
class GenerateWaveformFromCode(BenchmarkingBase):
"""Class to support waveform generation from code. Currently, vocoder only supports single speaker"""
def __init__(self, args):
super().__init__()
with open(args.vocoder_cfg) as f:
vocoder_cfg = json.load(f)
self.dur_prediction = args.dur_prediction
self.vocoder = CodeHiFiGANVocoder(args.vocoder, vocoder_cfg)
def format_units(self, input):
code = torch.LongTensor(list(map(int, input.strip().split()))).view(1, -1)
return {"code": code}
def generate_vocoder_input(self, dataset):
return [self.format_units(sample) for sample in dataset]
def forward(self, sample):
return [{"waveform": self.vocoder(sample, self.dur_prediction)}]
class HubertUnitExtractor(BenchmarkingBase):
def __init__(self, args):
self.feature_reader = HubertFeatureReader(
args.hubert_ckpt_path, args.hubert_layer
)
self.kmeans = ApplyKmeans(args.hubert_km_path)
def forward(self, sample):
with torch.no_grad():
feat = []
for start in range(0, sample.size(1), self.feature_reader.max_chunk):
x_chunk = sample[:, start : start + self.max_chunk]
feat_chunk, _ = self.feature_reader.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
output_layer=self.layer,
)
feat.append(feat_chunk)
torch.cat(feat, 1).squeeze(0)
return self.kmeans(feat).tolist()
class SpeechGeneration(BenchmarkingBase):
"""Class similar to examples/text_to_speech/generate_waveform.py.
Supports models with speech generation as end goal (TTS, Direct S2ST models etc)"""
def __init__(self, args):
super().__init__()
self.use_cuda = not getattr(args, "cpu", False)
self.setUp(args)
self.s2x_task = self.task
def setUp(self, args):
if args.task == "speech_to_speech":
args.normalize_waveform = False
self.task = tasks.setup_task(args)
self.pre_tokenizer = self.task.build_tokenizer(args)
self.bpe_tokenizer = self.task.build_bpe(args)
try:
self.src_dict = self.task.src_dict
except Exception:
self.src_dict = None
ensemble, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=ast.literal_eval(args.model_overrides),
task=self.task,
strict=False,
)
self.model = ensemble[0]
if self.use_cuda:
self.model.cuda()
# criterion.cuda()
self.model.eval()
self.generator = self.task.build_generator(
[self.model],
args,
)
def processTextInput(self, text):
"""Generate source tokens from text input"""
if self.pre_tokenizer is not None:
text = self.pre_tokenizer.encode(text)
if self.bpe_tokenizer is not None:
text = self.bpe_tokenizer.encode(text)
target = self.src_dict.encode_line(
text, add_if_not_exist=False, append_eos=True
).long()
target = fairseq_data_utils.collate_tokens(
[target],
self.src_dict.pad(),
self.src_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
src_lengths = torch.tensor([target.size(1)], dtype=torch.long)
prev_output_tokens = None
sample = {
"net_input": {
"src_tokens": target,
"src_lengths": src_lengths,
"prev_output_tokens": prev_output_tokens,
}
}
sample = utils.move_to_cuda(sample) if self.use_cuda else sample
return sample
def forward(self, sample):
sample["speaker"] = None
output = self.generator.generate(self.model, sample) # , has_targ=False
return output
class S2UT(BenchmarkingBase):
"""Class to support S2UT models. Also supports generating waveforms from the units predicted"""
def __init__(self, s2u_args, vocoder_args=None):
super().__init__()
self.s2u = Processing(s2u_args)
self.vocoder = None
if vocoder_args:
self.vocoder = GenerateWaveformFromCode(vocoder_args)
self.vocoder_input = None
def forward(self, sample):
s2u_hypos = self.s2u(sample)
s2u_output = self.s2u.decode_target(s2u_hypos)
if not self.vocoder:
return s2u_output
units = self.vocoder.format_units(s2u_output)
vocoder_output = self.vocoder(units)
return vocoder_output
def generate_s2u_outputs(self, dataset):
return [self.s2u.decode_target(self.s2u(sample)) for sample in dataset]
def compute_metrics(self, metric_type, dataset, repeat=None):
"""Generic function to compute metrics ignoring the io processing time"""
if self.vocoder and not self.vocoder_input:
self.s2u_output = self.generate_s2u_outputs(dataset)
self.vocoder_input = self.vocoder.generate_vocoder_input(self.s2u_output)
s2u_metrics = getattr(self.s2u, metric_type)(
dataset,
repeat,
)
vocoder_metrics = 0
if self.vocoder:
vocoder_metrics = getattr(self.vocoder, metric_type)(
self.vocoder_input,
repeat,
)
print(
f"metric_type = {metric_type} s2u_metrics = {s2u_metrics} \t vocoder_metrics = {vocoder_metrics}"
)
if metric_type == "max_memory":
return max(s2u_metrics, vocoder_metrics)
else:
return s2u_metrics + vocoder_metrics
def benchmark_run_time(self, dataset, repeat):
return self.compute_metrics("benchmark_run_time", dataset, repeat)
def count_flops(self, dataset, repeat):
return self.compute_metrics("count_flops", dataset, repeat)
def max_memory(self, dataset, repeat):
return self.compute_metrics("max_memory", dataset, repeat)
class Cascaded2StageS2ST(BenchmarkingBase):
"""ST + TTS"""
def __init__(self, s2t_args, tts_args):
super().__init__()
self.s2t = Processing(s2t_args)
self.s2x_task = self.s2t.task
self.tts = SpeechGeneration(tts_args) if tts_args else None
self.training = False
self.tts_inputs = None
def forward(self, sample):
if not self.tts:
raise Exception(
"Forward function is not callable without tts. Reinitialize the class with tts_args"
)
s2t_hypos = self.s2t(sample)
s2t_output = self.s2t.decode_target(s2t_hypos)
tts_input = self.tts.processTextInput(s2t_output)
tts_output = self.tts(tts_input)
return tts_output
def generate_s2t_outputs(self, dataset):
"""Process dataset and generate s2t outputs"""
return [self.s2t.decode_target(self.s2t(sample)) for sample in dataset]
def generate_tts_inputs(self, dataset):
"""Process dataset and generate tts inputs"""
return [self.tts.processTextInput(sample) for sample in dataset]
def compute_metrics(self, metric_type, dataset, repeat=None):
"""Generic function to compute metrics ignoring the io processing time"""
if not self.tts_inputs:
s2t_outputs = self.generate_s2t_outputs(dataset)
self.tts_inputs = self.generate_tts_inputs(s2t_outputs)
s2t_metrics = getattr(self.s2t, metric_type)(
dataset,
repeat,
)
tts_metrics = getattr(self.tts, metric_type)(
self.tts_inputs,
repeat,
)
print(
f"metric_type = {metric_type} s2t_metrics = {s2t_metrics} \t tts_metrics = {tts_metrics}"
)
if metric_type == "max_memory":
return max(s2t_metrics, tts_metrics)
else:
return s2t_metrics + tts_metrics
def benchmark_run_time(self, dataset, repeat):
return self.compute_metrics("benchmark_run_time", dataset, repeat)
def count_flops(self, dataset, repeat):
return self.compute_metrics("count_flops", dataset, repeat)
def max_memory(self, dataset, repeat):
return self.compute_metrics("max_memory", dataset, repeat)
class Cascaded3StageS2ST(Cascaded2StageS2ST):
"""ASR + MT + TTS"""
def __init__(self, s2t_args, tts_args, mt_args):
super().__init__(s2t_args, tts_args)
self.mt = Processing(mt_args)
self.mt_inputs = []
def forward(self, sample):
s2t_hypos = self.s2t(sample)
s2t_output = self.s2t.decode_target(s2t_hypos)
mt_input = self.mt.encode_source(s2t_output)
mt_hypos = self.mt(mt_input)
mt_output = self.mt.decode_target(mt_hypos)
tts_input = self.tts.processTextInput(mt_output)
tts_output = self.tts(tts_input)
return tts_output
def generate_mt_inputs(self, dataset):
"""Process dataset to generate mt model inputs"""
return [self.mt.encode_source(sample) for sample in dataset]
def generate_mt_outputs(self, dataset):
"""Process dataset to generate mt model outputs"""
return [self.mt.decode_target(self.mt(sample)) for sample in dataset]
def compute_metrics(self, metric_type, dataset, repeat=None):
"""Generic function to compute metrics ignoring the io processing time"""
if not self.tts_inputs:
s2t_outputs = self.generate_s2t_outputs(dataset)
self.mt_inputs = self.generate_mt_inputs(s2t_outputs)
mt_outputs = self.generate_mt_outputs(self.mt_inputs)
self.tts_inputs = self.generate_tts_inputs(mt_outputs)
s2t_metrics = getattr(self.s2t, metric_type)(
dataset,
repeat,
)
mt_metrics = getattr(self.mt, metric_type)(self.mt_inputs, repeat)
tts_metrics = getattr(self.tts, metric_type)(
self.tts_inputs,
repeat,
)
print(
f"metric_type = {metric_type} s2t_metrics = {s2t_metrics} \t mt_metrics = {mt_metrics} \t tts_metrics = {tts_metrics}"
)
if metric_type == "max_memory":
return max(s2t_metrics, mt_metrics, tts_metrics)
else:
return s2t_metrics + mt_metrics + tts_metrics
| 17,782 | 35.440574 | 131 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/benchmarking/data_utils.py | from fairseq import tasks
import numpy as np
import logging
import random
from fairseq import options
import torch
import os
import soundfile as sf
from fairseq.data.audio.audio_utils import (
get_waveform,
parse_path,
)
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
random.seed(1)
np.random.seed(1)
random_number_generator = np.random.RandomState(30)
def generate_random_data_sample(T, B=1, D=80):
"""Generate random data sample given the T, B, D values"""
net_input = {
"src_tokens": torch.tensor(random_number_generator.randn(B, T, D)).float(),
"src_lengths": torch.tensor([T]),
}
return {"net_input": net_input}
def generate_random_dataset(T_range_min, T_range_max, B=1, D=80, dataset_size=100):
"""Generate random dataset with T values within a given range, B, D"""
T_values = [random.randint(T_range_min, T_range_max) for i in range(dataset_size)]
dataset = []
for t in T_values:
dataset.append(generate_random_data_sample(t, B, D))
return dataset, sum(T_values) / dataset_size
def load_dataset_npy(file_name, dataset_size=None):
"""Load dataset from a .npy file."""
data = np.load(file_name, allow_pickle=True)
if dataset_size:
data = data[:dataset_size]
return data
def load_dataset_raw_to_waveforms(
file_name,
dataset_size=None,
need_waveform=True,
sample_rate=16000,
read_using_soundfile=False,
):
"""Load raw dataset from w2v tsv file. Optionally get waveforms"""
data = []
with open(file_name, "r") as fp:
lines = fp.readlines()
data = [
os.path.join(lines[0].strip(), line.strip().split("\t")[0])
for line in lines[1:]
]
if dataset_size:
data = data[:dataset_size]
if not need_waveform:
return data
features = []
if read_using_soundfile:
for _i, d in enumerate(data):
wav = sf.read(d)[0]
if wav.ndim == 2:
wav = wav.mean(-1)
features.append(torch.from_numpy(wav).float().view(1, -1))
else:
for i, d in enumerate(data):
_path, slice_ptr = parse_path(d)
if len(slice_ptr) == 0:
feat = get_waveform(
_path, always_2d=True, output_sample_rate=sample_rate
)[0]
features.append(
{
"id": i,
"net_input": {
"src_tokens": torch.tensor(feat),
"src_lengths": torch.tensor([feat.shape[1]]),
},
}
)
else:
raise Exception("Currently unsupported data format")
return features
def load_dataset_task(
args,
batch_size=1,
limit_size=None,
ref_dataset=None,
):
"""Loads dataset based on args by creating a task"""
if not args.data or not args.subset or not args.task:
raise Exception(
"Please provide necessary arguments to load the dataset - data, subset and task"
)
task = tasks.setup_task(args)
task.load_dataset(args.subset)
if not limit_size:
limit_size = len(task.dataset(args.subset))
iter = task.get_batch_iterator(
dataset=task.dataset(args.subset), max_sentences=batch_size
).next_epoch_itr(shuffle=False)
dataset = []
for i, sample in enumerate(iter):
sample = {
"id": task.datasets[args.subset].ids[sample["id"].item()],
"net_input": {
"src_tokens": sample["net_input"]["src_tokens"],
"src_lengths": sample["net_input"]["src_lengths"],
},
}
dataset.append(sample)
if i == limit_size - 1:
break
if ref_dataset:
try:
ids = get_ids_from_dataset(ref_dataset)
except Exception as e:
raise Exception(f"{e} - Cannot extract ids from reference dataset")
filtered_dataset = []
for sample in dataset:
if (
sample["id"] in ids
or sample["id"][5:] in ids
or f"dev_{sample['id']}" in ids
):
filtered_dataset.append(sample)
dataset = filtered_dataset
max_len, min_len, avg_len = get_dataset_stats(dataset)
print(
f"{args.subset} dataset stats : num_samples={len(dataset)} max_len = {max_len} min_len = {min_len} avg_len = {avg_len}"
)
return dataset
def randomly_sample_subset(dataset, size=500):
"""Randomly sample subset from a dataset"""
random_indices = [random.randint(0, len(dataset) - 1) for i in range(size)]
return [dataset[i] for i in random_indices]
def get_short_data_subset(dataset, size=500):
"""Get a subset of desired size by sorting based on src_lengths"""
return sort_dataset(dataset)[:size]
def get_long_data_subset(dataset, size=500):
"""Get a subset of desired size by sorting based on src_lengths descending"""
return sort_dataset(dataset, reverse=True)[:size]
def sort_dataset(dataset, reverse=False):
return sorted(
dataset, key=lambda x: x["net_input"]["src_lengths"].item(), reverse=reverse
)
def save_dataset_npy(dataset, file_name):
"""Save a dataset as .npy file"""
np.save(file_name, dataset)
def get_dataset_stats(dataset):
"""Get stats about dataset based on src_lengths of samples"""
max_len = 0
min_len = 100000
avg_len = 0
for d in dataset:
max_len = max(max_len, d["net_input"]["src_lengths"].item())
min_len = min(min_len, d["net_input"]["src_lengths"].item())
avg_len += d["net_input"]["src_lengths"].item()
return max_len, min_len, avg_len / len(dataset)
def make_parser():
"""
Additional args:
1. Provide the dataset dir path using --data.
2. Loading the dataset doesn't require config, provide --config-yaml to apply additional feature transforms
"""
parser = options.get_speech_generation_parser()
parser.add_argument(
"--subset",
default=None,
type=str,
required=True,
help="Subset to use for dataset generation",
)
parser.add_argument(
"--dataset-save-dir",
default=None,
type=str,
required=False,
help="Dir path in which the datasets are to be saved",
)
parser.add_argument(
"--ref-dataset",
default=None,
type=str,
required=False,
help="If provided, the ids in the reference dataset will be used to filter the new dataset generated.",
)
parser.add_argument("--dataset-save-token", default="", type=str, required=False)
options.add_generation_args(parser)
return parser
def get_ids_from_dataset(dataset):
return {sample["id"]: 1 for sample in dataset}
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
dataset = load_dataset_task(args)
random_dataset = randomly_sample_subset(dataset)
short_dataset = get_short_data_subset(dataset)
long_dataset = get_long_data_subset(dataset)
if args.dataset_save_token:
args.dataset_save_token = f"_{args.dataset_save_token}_"
if args.dataset_save_dir:
save_dataset_npy(
random_dataset,
f"{args.dataset_save_dir}/random_dataset{args.dataset_save_token}w_ids.npy",
)
save_dataset_npy(
short_dataset,
f"{args.dataset_save_dir}/short_dataset{args.dataset_save_token}w_ids.npy",
)
save_dataset_npy(
long_dataset,
f"{args.dataset_save_dir}/long_dataset{args.dataset_save_token}w_ids.npy",
)
if __name__ == "__main__":
cli_main()
| 7,893 | 28.788679 | 127 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/benchmarking/get_metrics.py | import copy
import torch
import logging
from argparse import Namespace
import yaml
from fairseq import options
from examples.speech_to_speech.benchmarking.core import (
Processing,
SpeechGeneration,
Cascaded2StageS2ST,
Cascaded3StageS2ST,
S2UT,
)
from examples.speech_to_speech.benchmarking.data_utils import (
load_dataset_npy,
load_dataset_raw_to_waveforms,
)
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
torch.manual_seed(1)
torch.set_deterministic(True)
def make_parser():
"""Note: As the names indicate use s2x_args(ex:ST, ASR etc) for models with speech input,
x2s_args for models with speech output(ex:TTS) and mt_args for translation models (ex: mt, T2U etc).
For direct S2ST models, use x2s_args to provide model details.
"""
parser = options.get_speech_generation_parser()
parser.add_argument("--target-is-code", action="store_true", default=False)
parser.add_argument("--config", type=str)
parser.add_argument(
"--model-type",
default="S2U",
choices=["S2S", "TTS", "S2UT", "MT", "S2T", "2StageS2ST", "3StageS2ST"],
help="Choose one of the models. For model inference implementation, refer to core.py",
)
parser.add_argument(
"--dataset-path",
type=str,
help="""File to load dataset from. Assumes dataset is a list of samples.
Each sample is a dict of format {'net_input':{'src_tokens':torch.tenor(),'src_lengths':torch.tensor()}}""",
)
parser.add_argument(
"--dataset-type",
type=str,
default="npy",
choices=["npy", "raw"],
help="""Type of input dataset file""",
)
parser.add_argument(
"--read-using-sf",
type=str,
default=False,
help="""If sound file should be used to read the raw dataset""",
)
parser.add_argument(
"--dataset-size",
default=None,
type=int,
help="Dataset size to use for benchmarking",
)
parser.add_argument(
"--dump-speech-waveforms-dir",
default=None,
type=str,
help="Directory to dump the speech waveforms computed on the dataset.",
)
parser.add_argument(
"--dump-waveform-file-prefix",
default="",
type=str,
help="File name prefix for the saved speech waveforms",
)
parser.add_argument(
"--feat-dim", default=80, type=int, help="Input feature dimension"
)
parser.add_argument(
"--target-sr",
default=16000,
type=int,
help="Target sample rate for dumping waveforms",
)
options.add_generation_args(parser)
options.get_interactive_generation_parser(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
with open(
args.config,
"r",
) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
dict_args = vars(args)
dict_args.update(config["general"])
args = Namespace(**dict_args)
i = 1
stage_args = []
while i <= 3:
var = f"stage{i}"
tmp_args = copy.deepcopy(dict_args)
if var in config:
tmp_args.update(config[var])
stage_args.append(Namespace(**tmp_args))
i += 1
else:
break
if args.model_type == "S2S" or args.model_type == "TTS":
model = SpeechGeneration(stage_args[0])
elif args.model_type == "S2UT":
model = S2UT(stage_args[0], stage_args[1] if len(stage_args) > 1 else None)
elif args.model_type == "MT" or args.model_type == "S2T":
model = Processing(stage_args[0])
elif args.model_type == "2StageS2ST":
model = Cascaded2StageS2ST(stage_args[0], stage_args[1])
elif args.model_type == "3StageS2ST":
model = Cascaded3StageS2ST(stage_args[0], stage_args[2], stage_args[1])
else:
raise Exception(f"Currently unsupported model type {args.model_type}")
print(f"Evaluating on dataset - {args.dataset_path}\n")
if args.dataset_type == "npy":
dataset = load_dataset_npy(args.dataset_path, dataset_size=args.dataset_size)
elif args.dataset_type == "raw":
dataset = load_dataset_raw_to_waveforms(
args.dataset_path,
dataset_size=args.dataset_size,
read_using_soundfile=args.read_using_sf,
)
else:
raise Exception(f"Invalid dataset type {args.dataset_type}")
model.warm_up(sample=dataset[0], repeat=2)
run_time, memory, flops = model.gather_all_metrics(dataset, repeat=1)
print(f"run_time = {run_time}sec \tmemory = {memory}MiB \tflops = {flops}")
if args.dump_speech_waveforms_dir:
model.dump_final_speech_output(
dataset,
args.dump_speech_waveforms_dir,
lambda x: x,
args.target_sr,
prefix=args.dump_waveform_file_prefix,
)
if __name__ == "__main__":
cli_main()
| 5,053 | 30.006135 | 115 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/unity/sequence_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
from typing import Dict, List, Optional
import torch
from torch import Tensor
from fairseq.sequence_generator import EnsembleModel as EnsembleModelBase
from fairseq.sequence_generator import SequenceGenerator as SequenceGeneratorBase
class SequenceGenerator(SequenceGeneratorBase):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
tokens_to_suppress=(),
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__(
models=models,
tgt_dict=tgt_dict,
beam_size=beam_size,
max_len_a=max_len_a,
max_len_b=max_len_b,
max_len=max_len,
min_len=min_len,
normalize_scores=normalize_scores,
len_penalty=len_penalty,
unk_penalty=unk_penalty,
temperature=temperature,
match_source_len=match_source_len,
no_repeat_ngram_size=no_repeat_ngram_size,
search_strategy=search_strategy,
eos=eos,
symbols_to_strip_from_output=symbols_to_strip_from_output,
lm_model=lm_model,
lm_weight=lm_weight,
tokens_to_suppress=tokens_to_suppress,
)
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.model.set_decoder_beam_size(self.beam_size)
self.model.eval()
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
# if src_lengths exists in net_input (speech_to_text dataset case), then use it
if "src_lengths" in net_input:
src_lengths = net_input["src_lengths"]
else:
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad))
.long()
.sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception(
"expected src_tokens or source in net input. input keys: "
+ str(net_input.keys())
)
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, self.beam_size)
# compute the encoder output for each beam
with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"):
encoder_outs = self.model.forward_encoder(net_input)
finalized = self.generate_decoder(
encoder_outs,
src_tokens,
src_lengths,
sample,
prefix_tokens,
constraints,
bos_token,
)
return finalized
def generate_decoder(
self,
encoder_outs,
src_tokens,
src_lengths,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
aux_task_name="",
encoder_outs_aug: Optional[
Tensor
] = None, # an additional/augmented encoder_outs
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
decoder_name = f"{aux_task_name}_decoder" if aux_task_name else "decoder"
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
if encoder_outs_aug is not None:
encoder_outs_aug = self.model.reorder_encoder_out(
encoder_outs_aug, new_order
)
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(
incremental_states, reorder_state, decoder_name
)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
if encoder_outs_aug is not None:
encoder_outs_aug = self.model.reorder_encoder_out(
encoder_outs_aug, reorder_state
)
with torch.autograd.profiler.record_function(
"EnsembleModel: forward_decoder"
):
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
decoder_name=decoder_name,
encoder_outs_aug=encoder_outs_aug,
)
if self.lm_model is not None and not aux_task_name:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
else:
if step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
if self.token_indices_to_suppress is not None:
lprobs[:, self.token_indices_to_suppress] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
class EnsembleModel(EnsembleModelBase):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
decoder_name="decoder",
encoder_outs_aug: List[Dict[str, List[Tensor]]] = None,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
encoder_out_aug: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
if encoder_outs_aug is not None:
encoder_out_aug = encoder_outs_aug[i]
# decode each model
if self.has_incremental_states():
if encoder_out_aug is not None:
decoder_out = getattr(model, decoder_name).forward(
tokens,
encoder_out=encoder_out,
encoder_out_aug=encoder_out_aug,
incremental_state=incremental_states[i],
)
else:
decoder_out = getattr(model, decoder_name).forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, decoder_name):
decoder_out = getattr(model, decoder_name).forward(
tokens, encoder_out=encoder_out
)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = getattr(model, decoder_name).get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
decoder_name="decoder",
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
getattr(model, decoder_name).reorder_incremental_state_scripting(
incremental_states[i], new_order
)
| 25,480 | 39.639553 | 107 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/unity/sequence_generator_multi_decoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import search
class MultiDecoderSequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
tgt_dict_mt,
beam_size=1,
beam_size_mt=1,
max_len_a=0,
max_len_b=200,
max_len_a_mt=0,
max_len_b_mt=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
len_penalty_mt=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
eos=None,
eos_mt=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length for the second pass
max_len_a_mt/b_mt (int, optional): generate sequences of maximum length
ax + b, where x is the source length for the first pass
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty in the second pass, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
len_penalty (float, optional): length penalty in the first pass, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
from examples.speech_to_speech.unity.sequence_generator import SequenceGenerator
self.generator = SequenceGenerator(
models,
tgt_dict,
beam_size=beam_size,
max_len_a=max_len_a,
max_len_b=max_len_b,
max_len=max_len,
min_len=min_len,
normalize_scores=normalize_scores,
len_penalty=len_penalty,
unk_penalty=unk_penalty,
temperature=temperature,
match_source_len=match_source_len,
no_repeat_ngram_size=no_repeat_ngram_size,
search_strategy=search.BeamSearch(tgt_dict),
eos=eos,
symbols_to_strip_from_output=symbols_to_strip_from_output,
lm_model=lm_model,
lm_weight=lm_weight,
)
self.eos = self.generator.eos
self.generator_mt = SequenceGenerator(
models,
tgt_dict_mt,
beam_size=beam_size_mt,
max_len_a=max_len_a_mt,
max_len_b=max_len_b_mt,
max_len=max_len,
min_len=min_len,
normalize_scores=normalize_scores,
len_penalty=len_penalty_mt,
unk_penalty=unk_penalty,
temperature=temperature,
match_source_len=match_source_len,
no_repeat_ngram_size=no_repeat_ngram_size,
search_strategy=search.BeamSearch(tgt_dict_mt),
eos=eos_mt,
symbols_to_strip_from_output=symbols_to_strip_from_output,
)
@torch.no_grad()
def generate(
self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs
) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
# if src_lengths exists in net_input (speech_to_text dataset case), then use it
if "src_lengths" in net_input:
src_lengths = net_input["src_lengths"]
else:
src_lengths = (
(
src_tokens.ne(self.generator.eos)
& src_tokens.ne(self.generator.pad)
)
.long()
.sum(dim=1)
)
else:
raise Exception(
"expected src_tokens or source in net input. input keys: "
+ str(net_input.keys())
)
if constraints is not None and not self.generator.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.generator.search.init_constraints(constraints, self.generator.beam_size)
self.generator_mt.search.init_constraints(
constraints, self.generator_mt.beam_size
)
# compute the encoder output for each beam
with torch.autograd.profiler.record_function("EnsembleModel: forward_encoder"):
encoder_outs = self.generator.model.forward_encoder(net_input)
single_model = self.generator.model.single_model
mt_decoder = getattr(single_model, f"{single_model.mt_task_name}_decoder")
# 1. MT decoder
finalized_mt = self.generator_mt.generate_decoder(
encoder_outs,
src_tokens,
src_lengths,
sample,
prefix_tokens,
constraints,
bos_token,
aux_task_name=single_model.mt_task_name,
)
# extract decoder output corresponding to the best hypothesis
max_tgt_len = max([len(hypo[0]["tokens"]) for hypo in finalized_mt])
prev_output_tokens_mt = (
src_tokens.new_zeros(src_tokens.shape[0], max_tgt_len)
.fill_(mt_decoder.padding_idx)
.int()
) # B x T
for i, hypo in enumerate(finalized_mt):
i_beam = 0
tmp = hypo[i_beam]["tokens"].int() # hyp + eos
prev_output_tokens_mt[i, 0] = self.generator_mt.eos
if tmp[-1] == self.generator_mt.eos:
tmp = tmp[:-1]
prev_output_tokens_mt[i, 1 : len(tmp) + 1] = tmp
text = "".join([self.generator_mt.tgt_dict[c] for c in tmp])
text = text.replace("_", " ")
text = text.replace("▁", " ")
text = text.replace("<unk>", " ")
text = text.replace("<s>", "")
text = text.replace("</s>", "")
if len(text) > 0 and text[0] == " ":
text = text[1:]
sample_id = sample["id"].tolist()[i]
print("{} (None-{})".format(text, sample_id))
x = mt_decoder(
prev_output_tokens_mt,
encoder_out=encoder_outs[0],
features_only=True,
)[0].transpose(0, 1)
if getattr(single_model, "proj", None) is not None:
x = single_model.proj(x)
mt_decoder_padding_mask = None
if prev_output_tokens_mt.eq(mt_decoder.padding_idx).any():
mt_decoder_padding_mask = prev_output_tokens_mt.eq(mt_decoder.padding_idx)
# 2. T2U encoder
if getattr(single_model, "synthesizer_encoder", None) is not None:
t2u_encoder_out = single_model.synthesizer_encoder(
x,
mt_decoder_padding_mask,
)
else:
t2u_encoder_out = {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [mt_decoder_padding_mask]
if mt_decoder_padding_mask is not None
else [], # B x T
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
if getattr(single_model, "t2u_augmented_cross_attn", False):
encoder_outs_aug = [t2u_encoder_out]
else:
encoder_outs = [t2u_encoder_out]
encoder_outs_aug = None
# 3. T2U decoder
finalized = self.generator.generate_decoder(
encoder_outs,
src_tokens,
src_lengths,
sample,
prefix_tokens,
constraints,
bos_token,
encoder_outs_aug=encoder_outs_aug,
)
return finalized
| 10,095 | 36.671642 | 95 | py |
rej-summ | rej-summ-main/examples/speech_to_speech/preprocessing/prep_s2spect_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
import torchaudio
import soundfile as sf
from tqdm import tqdm
import pandas as pd
from examples.speech_synthesis.data_utils import extract_logmel_spectrogram
from examples.speech_to_speech.preprocessing.data_utils import gen_config_yaml
from examples.speech_to_text.data_utils import create_zip, get_zip_manifest, save_df_to_tsv
from fairseq.data.audio.audio_utils import convert_waveform
logger = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "src_audio", "src_n_frames", "tgt_audio", "tgt_n_frames"]
def prepare_target_data(args, tgt_audios):
feature_name = "logmelspec80"
zip_path = args.output_root / f"{feature_name}.zip"
if zip_path.exists():
print(f"{zip_path} exists.")
return zip_path
feature_root = args.output_root / feature_name
feature_root.mkdir(exist_ok=True)
print("Extracting Mel spectrogram features...")
for tgt_audio in tqdm(tgt_audios):
sample_id = tgt_audio.stem
waveform, sample_rate = torchaudio.load(tgt_audio.as_posix())
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
to_sample_rate=args.sample_rate
)
extract_logmel_spectrogram(
waveform, sample_rate, feature_root / f"{sample_id}.npy",
win_length=args.win_length, hop_length=args.hop_length,
n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min,
f_max=args.f_max
)
print("ZIPing features...")
create_zip(feature_root, zip_path)
shutil.rmtree(feature_root)
return zip_path
def process(args):
os.makedirs(args.output_root, exist_ok=True)
manifest = {}
tgt_audios = []
for split in args.data_split:
print(f"Processing {split}...")
manifest[split] = {c: [] for c in MANIFEST_COLUMNS}
missing_tgt_audios = []
src_audios = list(args.source_dir.glob(f"{split}/*.wav"))
for src_audio in tqdm(src_audios):
sample_id = src_audio.stem
tgt_audio = args.target_dir / split / f"{sample_id}.wav"
if not tgt_audio.is_file():
missing_tgt_audios.append(sample_id)
continue
tgt_audios.append(tgt_audio)
src_n_frames = sf.info(src_audio.as_posix()).frames
manifest[split]["id"].append(sample_id)
manifest[split]["src_audio"].append(src_audio.as_posix())
manifest[split]["src_n_frames"].append(
src_n_frames // 160
) # estimation of 10-ms frame for 16kHz audio
print(f"Processed {len(manifest[split]['id'])} samples")
if len(missing_tgt_audios) > 0:
print(
f"{len(missing_tgt_audios)} with missing target data (first 3 examples: {', '.join(missing_tgt_audios[:3])})"
)
# Extract features and pack features into ZIP
zip_path = prepare_target_data(args, tgt_audios)
print("Fetching ZIP manifest...")
tgt_audio_paths, tgt_audio_lengths = get_zip_manifest(zip_path)
print("Generating manifest...")
for split in args.data_split:
print(f"Processing {split}...")
for sample_id in tqdm(manifest[split]["id"]):
manifest[split]["tgt_audio"].append(tgt_audio_paths[sample_id])
manifest[split]["tgt_n_frames"].append(tgt_audio_lengths[sample_id])
out_manifest = args.output_root / f"{split}.tsv"
print(f"Writing manifest to {out_manifest}...")
save_df_to_tsv(pd.DataFrame.from_dict(manifest[split]), out_manifest)
# Generate config YAML
win_len_t = args.win_length / args.sample_rate
hop_len_t = args.hop_length / args.sample_rate
extra = {
"features": {
"type": "spectrogram+melscale+log",
"sample_rate": args.sample_rate,
"eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft,
"window_fn": "hann", "win_length": args.win_length,
"hop_length": args.hop_length,
"win_len_t": win_len_t, "hop_len_t": hop_len_t,
"f_min": args.f_min, "f_max": args.f_max,
"n_stft": args.n_fft // 2 + 1
}
}
gen_config_yaml(
args.output_root,
audio_root=args.output_root.as_posix(),
specaugment_policy="lb",
feature_transform=["utterance_cmvn", "delta_deltas"],
extra=extra,
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--source-dir", required=True, type=Path, help="source audio directory"
)
parser.add_argument(
"--target-dir", required=True, type=Path, help="target audio directory"
)
parser.add_argument(
"--data-split",
default=["train", "valid", "test"],
nargs="+",
help="data split names",
)
parser.add_argument(
"--output-root", required=True, type=Path, help="output directory"
)
# target feature related
parser.add_argument("--win-length", type=int, default=1024)
parser.add_argument("--hop-length", type=int, default=256)
parser.add_argument("--n-fft", type=int, default=1024)
parser.add_argument("--n-mels", type=int, default=80)
parser.add_argument("--f-min", type=int, default=20)
parser.add_argument("--f-max", type=int, default=8000)
parser.add_argument("--sample-rate", type=int, default=22050)
parser.add_argument("--normalize-volume", "-n", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 5,844 | 33.382353 | 125 | py |
rej-summ | rej-summ-main/examples/bart/summarize.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models.bart import BARTModel
import argparse
from tqdm import tqdm
XSUM_KWARGS = dict(max_len_b=60, min_len=10, no_repeat_ngram_size=3)
CNN_KWARGS = dict(max_len_b=140, min_len=55, no_repeat_ngram_size=3)
@torch.no_grad()
def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs):
count = 1
# if n_obs is not None: bsz = min(bsz, n_obs)
with open(infile) as source, open(outfile, "w") as fout:
sline = source.readline().strip()
slines = [sline]
for sline in tqdm(source):
if n_obs is not None and count > n_obs:
break
if count % bsz == 0:
hypotheses_batch = bart.sample(slines, **eval_kwargs)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + "\n")
fout.flush()
slines = []
slines.append(sline.strip())
count += 1
if slines != []:
hypotheses_batch = bart.sample(slines, **eval_kwargs)
for hypothesis in hypotheses_batch:
fout.write(hypothesis + "\n")
fout.flush()
def main():
"""
Usage::
python examples/bart/summarize.py \
--model-dir $HOME/bart.large.cnn \
--model-file model.pt \
--src $HOME/data-bin/cnn_dm/test.source
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-dir",
required=True,
type=str,
default="bart.large.cnn/",
help="path containing model file and src_dict.txt",
)
parser.add_argument(
"--model-file",
default="checkpoint_best.pt",
help="where in model_dir are weights saved",
)
parser.add_argument(
"--dict-dir",
required=True,
type=str,
default="xsum-bin/",
help="path containing dict.source.txt and dict.target.txt",
)
parser.add_argument(
"--src", default="test.source", help="text to summarize", type=str
)
parser.add_argument(
"--out", default="test.hypo", help="where to save summaries", type=str
)
parser.add_argument("--beam_size", default=6, help="Number of beams for decoding", type=int)
parser.add_argument("--bsz", default=32, help="Batch size", type=int)
parser.add_argument(
"--n", default=None, help="how many examples to summarize", type=int
)
parser.add_argument("--lenpen", default=1.0, help="Length penalty", type=float)
parser.add_argument("--rejpen", default=0.0, help="Rejction penalty", type=float)
parser.add_argument(
"--unnormalized", action="store_true", default=False,
)
parser.add_argument(
"--xsum-kwargs",
action="store_true",
default=False,
help="if true use XSUM_KWARGS else CNN_KWARGS",
)
args = parser.parse_args()
eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS
if args.model_dir == "pytorch/fairseq":
bart = torch.hub.load("pytorch/fairseq", args.model_file)
else:
bart = BARTModel.from_pretrained(
args.model_dir,
checkpoint_file=args.model_file,
data_name_or_path=args.dict_dir,
)
bart = bart.eval()
if torch.cuda.is_available():
bart = bart.cuda().half()
generate(
bart,
args.src,
bsz=args.bsz,
n_obs=args.n,
outfile=args.out,
beam=args.beam_size,
lenpen=args.lenpen,
rejpen=args.rejpen,
unnormalized=args.unnormalized,
**eval_kwargs
)
if __name__ == "__main__":
main()
| 3,864 | 30.169355 | 96 | py |
rej-summ | rej-summ-main/examples/data2vec/models/data2vec_audio.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import II
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from fairseq.modules import EMAModule, EMAModuleConfig
from fairseq.data.data_utils import compute_mask_indices
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec import (
ConvFeatureExtractionModel,
Wav2Vec2Config,
TransformerEncoder,
)
from fairseq.modules import (
GradMultiply,
LayerNorm,
)
from fairseq.utils import index_put
logger = logging.getLogger(__name__)
@dataclass
class Data2VecAudioConfig(Wav2Vec2Config):
loss_beta: float = field(
default=0, metadata={"help": "beta for smooth l1 loss. 0 means use l2 loss"}
)
loss_scale: Optional[float] = field(
default=None,
metadata={
"help": "scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)"
},
)
average_top_k_layers: int = field(
default=8, metadata={"help": "how many layers to average"}
)
layer_norm_target_layer: bool = False
instance_norm_target_layer: bool = False
instance_norm_targets: bool = False
layer_norm_targets: bool = False
batch_norm_target_layer: bool = False
group_norm_target_layer: bool = False
ema_decay: float = field(default=0.999, metadata={"help": "initial ema decay rate"})
ema_end_decay: float = field(
default=0.9999, metadata={"help": "final ema decay rate"}
)
# when to finish annealing ema decay rate
ema_anneal_end_step: int = II("optimization.max_update")
ema_transformer_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer"},
)
ema_layers_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer layers"},
)
max_update: int = II("optimization.max_update")
min_target_var: float = field(
default=0.1, metadata={"help": "stop training if target var falls below this"}
)
min_pred_var: float = field(
default=0.01,
metadata={"help": "stop training if prediction var falls below this"},
)
def get_annealed_rate(start, end, curr_step, total_steps):
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
@register_model("data2vec_audio", dataclass=Data2VecAudioConfig)
class Data2VecAudioModel(BaseFairseqModel):
def __init__(self, cfg: Data2VecAudioConfig):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.extractor_embed = feature_enc_layers[-1][0]
self.ema = None
self.embed = cfg.encoder_embed_dim
self.average_top_k_layers = cfg.average_top_k_layers
self.loss_beta = cfg.loss_beta
self.loss_scale = cfg.loss_scale
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = nn.Linear(self.extractor_embed, cfg.encoder_embed_dim)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.extractor_embed)
self.final_proj = nn.Linear(self.embed, self.embed)
self.num_updates = 0
def make_ema_teacher(self):
ema_config = EMAModuleConfig(
ema_decay=self.cfg.ema_decay,
ema_fp32=True,
)
skip_keys = set()
if self.cfg.ema_layers_only:
self.cfg.ema_transformer_only = True
for k, _ in self.encoder.pos_conv.named_parameters():
skip_keys.add(f"pos_conv.{k}")
self.ema = EMAModule(
self.encoder if self.cfg.ema_transformer_only else self,
ema_config,
skip_keys=skip_keys,
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
if self.ema is None and self.final_proj is not None:
logger.info(f"making ema teacher")
self.make_ema_teacher()
elif self.training and self.ema is not None:
if self.cfg.ema_decay != self.cfg.ema_end_decay:
if num_updates >= self.cfg.ema_anneal_end_step:
decay = self.cfg.ema_end_decay
else:
decay = get_annealed_rate(
self.cfg.ema_decay,
self.cfg.ema_end_decay,
num_updates,
self.cfg.ema_anneal_end_step,
)
self.ema.set_decay(decay)
if self.ema.get_decay() < 1:
self.ema.step(self.encoder if self.cfg.ema_transformer_only else self)
self.num_updates = num_updates
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = super().state_dict(destination, prefix, keep_vars)
if self.ema is not None:
state[prefix + "_ema"] = self.ema.fp32_params
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if self.ema is not None:
k = prefix + "_ema"
assert k in state_dict
self.ema.restore(state_dict[k], True)
del state_dict[k]
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@classmethod
def build_model(cls, cfg: Data2VecAudioConfig, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=1,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
require_same_masks=self.cfg.require_same_masks,
mask_dropout=self.cfg.mask_dropout,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
):
features = source
if self.feature_grad_mult > 0:
features = self.feature_extractor(features)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(features)
features = features.transpose(1, 2)
features = self.layer_norm(features)
orig_padding_mask = padding_mask
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
pre_encoder_features = None
if self.cfg.ema_transformer_only:
pre_encoder_features = features.clone()
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
else:
x = features
mask_indices = None
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=layer,
)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"layer_results": layer_results,
}
result = {
"losses": {},
}
with torch.no_grad():
self.ema.model.eval()
if self.cfg.ema_transformer_only:
y, layer_results = self.ema.model.extract_features(
pre_encoder_features,
padding_mask=padding_mask,
min_layer=self.cfg.encoder_layers - self.average_top_k_layers,
)
y = {
"x": y,
"padding_mask": padding_mask,
"layer_results": layer_results,
}
else:
y = self.ema.model.extract_features(
source=source,
padding_mask=orig_padding_mask,
mask=False,
)
target_layer_results = [l[2] for l in y["layer_results"]]
permuted = False
if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer:
target_layer_results = [
tl.permute(1, 2, 0) for tl in target_layer_results # TBC -> BCT
]
permuted = True
if self.cfg.batch_norm_target_layer:
target_layer_results = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in target_layer_results
]
if self.cfg.instance_norm_target_layer:
target_layer_results = [
F.instance_norm(tl.float()) for tl in target_layer_results
]
if permuted:
target_layer_results = [
tl.transpose(1, 2) for tl in target_layer_results # BCT -> BTC
]
if self.cfg.group_norm_target_layer:
target_layer_results = [
F.layer_norm(tl.float(), tl.shape[-2:])
for tl in target_layer_results
]
if self.cfg.layer_norm_target_layer:
target_layer_results = [
F.layer_norm(tl.float(), tl.shape[-1:])
for tl in target_layer_results
]
y = sum(target_layer_results) / len(target_layer_results)
if self.cfg.layer_norm_targets:
y = F.layer_norm(y.float(), y.shape[-1:])
if self.cfg.instance_norm_targets:
y = F.instance_norm(y.float().transpose(1, 2)).transpose(1, 2)
if not permuted:
y = y.transpose(0, 1)
y = y[mask_indices]
x = x[mask_indices]
x = self.final_proj(x)
sz = x.size(-1)
if self.loss_beta == 0:
loss = F.mse_loss(x.float(), y.float(), reduction="none").sum(dim=-1)
else:
loss = F.smooth_l1_loss(
x.float(), y.float(), reduction="none", beta=self.loss_beta
).sum(dim=-1)
if self.loss_scale is not None:
scale = self.loss_scale
else:
scale = 1 / math.sqrt(sz)
result["losses"]["regression"] = loss.sum() * scale
if "sample_size" not in result:
result["sample_size"] = loss.numel()
with torch.no_grad():
result["target_var"] = self.compute_var(y)
result["pred_var"] = self.compute_var(x.float())
if self.num_updates > 5000 and result["target_var"] < self.cfg.min_target_var:
logger.error(
f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting"
)
raise Exception(
f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting"
)
if self.num_updates > 5000 and result["pred_var"] < self.cfg.min_pred_var:
logger.error(
f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting"
)
raise Exception(
f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting"
)
if self.ema is not None:
result["ema_decay"] = self.ema.get_decay() * 1000
return result
@staticmethod
def compute_var(y):
y = y.view(-1, y.size(-1))
if dist.is_initialized():
zc = torch.tensor(y.size(0)).cuda()
zs = y.sum(dim=0)
zss = (y ** 2).sum(dim=0)
dist.all_reduce(zc)
dist.all_reduce(zs)
dist.all_reduce(zss)
var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1))
return torch.sqrt(var + 1e-6).mean()
else:
return torch.sqrt(y.var(dim=0) + 1e-6).mean()
def extract_features(
self, source, padding_mask, mask=False, layer=None
):
res = self.forward(
source,
padding_mask,
mask=mask,
features_only=True,
layer=layer,
)
return res
def remove_pretraining_modules(self, last_layer=None):
self.final_proj = None
self.ema = None
if last_layer is not None:
self.encoder.layers = nn.ModuleList(
l for i, l in enumerate(self.encoder.layers) if i <= last_layer
)
| 17,916 | 32.302974 | 104 | py |
rej-summ | rej-summ-main/examples/data2vec/models/data2vec_text.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.modules import EMAModule, EMAModuleConfig
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
)
from fairseq.models.roberta.model import RobertaLMHead, RobertaClassificationHead
from fairseq.models.transformer import TransformerEncoder, TransformerConfig
from fairseq.modules.transformer_sentence_encoder import init_bert_params
logger = logging.getLogger(__name__)
@dataclass
class Data2VecTextConfig(FairseqDataclass):
max_positions: int = II("task.tokens_per_sample")
head_layers: int = 1
transformer: TransformerConfig = TransformerConfig()
load_checkpoint_heads: bool = field(
default=False,
metadata={"help": "(re-)register and load heads when loading checkpoints"},
)
loss_beta: float = field(
default=0, metadata={"help": "beta for smooth l1 loss. 0 means use l2 loss"}
)
loss_scale: Optional[float] = field(
default=None,
metadata={
"help": "scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)"
},
)
average_top_k_layers: int = field(
default=8, metadata={"help": "how many layers to average"}
)
layer_norm_target_layer: bool = False
instance_norm_target_layer: bool = False
batch_norm_target_layer: bool = False
instance_norm_targets: bool = False
layer_norm_targets: bool = False
ema_decay: float = field(default=0.999, metadata={"help": "initial ema decay rate"})
ema_end_decay: float = field(
default=0.9999, metadata={"help": "final ema decay rate"}
)
# when to finish annealing ema decay rate
ema_anneal_end_step: int = II("optimization.max_update")
ema_transformer_layers_only: bool = field(
default=True,
metadata={"help": "whether to momentum update only the transformer layers"},
)
def get_annealed_rate(start, end, curr_step, total_steps):
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
@register_model("data2vec_text", dataclass=Data2VecTextConfig)
class Data2VecTextModel(FairseqEncoderModel):
def __init__(self, cfg: Data2VecTextConfig, encoder):
super().__init__(encoder)
self.cfg = cfg
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@classmethod
def build_model(cls, cfg, task):
"""Build a new model instance."""
encoder = Data2VecTextEncoder(cfg, task.source_dictionary, task.cfg.data)
return cls(cfg, encoder)
def forward(
self,
src_tokens,
target_tokens=None,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
**kwargs,
):
if classification_head_name is not None:
features_only = True
res = self.encoder(
src_tokens, target_tokens, features_only, return_all_hiddens, **kwargs
)
if isinstance(res, tuple):
x, extra = res
else:
return res
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = RobertaClassificationHead(
input_dim=self.cfg.transformer.encoder.embed_dim,
inner_dim=inner_dim or self.cfg.transformer.encoder.embed_dim,
num_classes=num_classes,
activation_fn="tanh",
pooler_dropout=0,
)
@property
def supported_targets(self):
return {"self"}
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# rename decoder -> encoder before upgrading children modules
for k in list(state_dict.keys()):
if k.startswith(prefix + "decoder"):
new_k = prefix + "encoder" + k[len(prefix + "decoder") :]
state_dict[new_k] = state_dict[k]
del state_dict[k]
# rename emb_layer_norm -> layernorm_embedding
for k in list(state_dict.keys()):
if ".emb_layer_norm." in k:
new_k = k.replace(".emb_layer_norm.", ".layernorm_embedding.")
state_dict[new_k] = state_dict[k]
del state_dict[k]
if self.encoder.regression_head is not None:
if ".lm_head." in k:
new_k = k.replace(".lm_head.", ".regression_head.")
state_dict[new_k] = state_dict[k]
del state_dict[k]
else:
if ".regression_head." in k:
del state_dict[k]
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
or self.classification_heads is None
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if self.cfg.load_checkpoint_heads:
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if (
hasattr(self, "classification_heads")
and self.classification_heads is not None
and len(self.classification_heads) > 0
):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
for k in list(state_dict.keys()):
if k.startswith(prefix + "encoder.lm_head.") or k.startswith(
prefix + "encoder.emb_head."
):
del state_dict[k]
self.encoder.lm_head = None
if self.encoder.target_model is None:
for k in list(state_dict.keys()):
if k.startswith(prefix + "encoder.target_model."):
del state_dict[k]
if (self.encoder.ema is None) and (prefix + "encoder._ema" in state_dict):
del state_dict[prefix + "encoder._ema"]
def remove_pretraining_modules(self, last_layer=None):
self.encoder.lm_head = None
self.encoder.regression_head = None
self.encoder.ema = None
self.classification_heads = None
if last_layer is not None:
self.encoder.sentence_encoder.layers = nn.ModuleList(
l
for i, l in enumerate(self.encoder.sentence_encoder.layers)
if i <= last_layer
)
self.encoder.sentence_encoder.layer_norm = None
class Data2VecTextEncoder(FairseqEncoder):
def __init__(self, cfg: Data2VecTextConfig, dictionary, task_data):
super().__init__(dictionary)
self.cfg = cfg
embed_tokens = self.build_embedding(
len(dictionary), cfg.transformer.encoder.embed_dim, dictionary.pad()
)
self.sentence_encoder = self.build_encoder(cfg, dictionary, embed_tokens)
self.mask_idx = dictionary.index("<mask>")
assert self.mask_idx != dictionary.unk(), dictionary.symbols
self.ema = None
self.average_top_k_layers = cfg.average_top_k_layers
self.loss_scale = cfg.loss_scale
assert self.cfg.head_layers >= 1
embed_dim = cfg.transformer.encoder.embed_dim
curr_dim = embed_dim
projs = []
for i in range(self.cfg.head_layers - 1):
next_dim = embed_dim * 2 if i == 0 else curr_dim
projs.append(nn.Linear(curr_dim, next_dim))
projs.append(nn.GELU())
curr_dim = next_dim
projs.append(nn.Linear(curr_dim, embed_dim))
self.regression_head = nn.Sequential(*projs)
self.num_updates = 0
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_encoder(self, cfg, dictionary, embed_tokens):
encoder = TransformerEncoder(cfg.transformer, dictionary, embed_tokens, return_fc=True)
encoder.apply(init_bert_params)
return encoder
def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
return RobertaLMHead(embed_dim, output_dim, activation_fn, weight)
def make_ema_teacher(self):
ema_config = EMAModuleConfig(
ema_decay=self.cfg.ema_decay,
ema_fp32=True,
)
skip_keys = set()
if self.cfg.ema_transformer_layers_only:
for k, _ in self.sentence_encoder.embed_positions.named_parameters():
skip_keys.add(f"embed_tokens.{k}")
for k, _ in self.sentence_encoder.embed_positions.named_parameters():
skip_keys.add(f"embed_positions.{k}")
if self.sentence_encoder.layernorm_embedding is not None:
for (
k,
_,
) in self.sentence_encoder.layernorm_embedding.named_parameters():
skip_keys.add(f"layernorm_embedding.{k}")
if self.sentence_encoder.layer_norm is not None:
for k, _ in self.sentence_encoder.layer_norm.named_parameters():
skip_keys.add(f"layernorm_embedding.{k}")
self.ema = EMAModule(
self.sentence_encoder,
ema_config,
skip_keys=skip_keys,
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
if self.ema is None and self.regression_head is not None:
logger.info(f"making ema teacher")
self.make_ema_teacher()
elif self.training and self.ema is not None:
if self.cfg.ema_decay != self.cfg.ema_end_decay:
if num_updates >= self.cfg.ema_anneal_end_step:
decay = self.cfg.ema_end_decay
else:
decay = get_annealed_rate(
self.cfg.ema_decay,
self.cfg.ema_end_decay,
num_updates,
self.cfg.ema_anneal_end_step,
)
self.ema.set_decay(decay)
if self.ema.get_decay() < 1:
self.ema.step(self.sentence_encoder)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = super().state_dict(destination, prefix, keep_vars)
if self.ema is not None:
state[prefix + "_ema"] = self.ema.fp32_params
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if self.ema is not None:
k = prefix + "_ema"
assert k in state_dict
self.ema.restore(state_dict[k], True)
del state_dict[k]
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
def forward(
self,
src_tokens,
target_tokens=None,
features_only=False,
return_all_hiddens=False,
masked_tokens=None,
**unused,
):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(
src_tokens, return_all_hiddens=return_all_hiddens
)
if features_only:
return x, extra
assert target_tokens is not None
with torch.no_grad():
# use EMA parameter as the teacher
self.ema.model.eval()
encoder_out = self.ema.model(
target_tokens,
return_all_hiddens=True,
)
y = encoder_out["fc_results"]
y = y[-self.average_top_k_layers :]
permuted = False
if self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer:
y = [tl.permute(1, 2, 0) for tl in y] # TBC -> BCT
permuted = True
if self.cfg.batch_norm_target_layer:
y = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in y
]
if self.cfg.instance_norm_target_layer:
y = [F.instance_norm(tl.float()) for tl in y]
if permuted:
y = [tl.transpose(1, 2) for tl in y] # BCT -> BTC
if self.cfg.layer_norm_target_layer:
y = [F.layer_norm(tl.float(), tl.shape[-1:]) for tl in y]
y = sum(y) / len(y)
if not permuted:
y = y.transpose(0, 1)
if self.cfg.layer_norm_targets:
y = F.layer_norm(y.float(), y.shape[-1:])
if self.cfg.instance_norm_targets:
y = F.instance_norm(y.transpose(1, 2)).transpose(1, 2)
masked_indices = src_tokens.eq(self.mask_idx)
x = x[masked_indices]
y = y[masked_indices]
x = self.regression_head(x)
sz = x.size(-1)
if self.cfg.loss_beta == 0:
loss = F.mse_loss(x.float(), y.float(), reduction="none").sum(dim=-1)
else:
loss = F.smooth_l1_loss(
x.float(), y.float(), reduction="none", beta=self.cfg.loss_beta
).sum(dim=-1)
result = {
"losses": {
"main": loss.sum() / math.sqrt(sz)
if self.loss_scale <= 0
else loss.sum() * self.loss_scale,
},
"sample_size": loss.numel(),
}
# logging other values
other_logs = {
"ema_decay": self.ema.get_decay() * 1000
}
result["logs"] = other_logs
return result
def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs):
encoder_out = self.sentence_encoder(
src_tokens,
return_all_hiddens=return_all_hiddens,
token_embeddings=kwargs.get("token_embeddings", None),
)
# T x B x C -> B x T x C
features = encoder_out["encoder_out"][0].transpose(0, 1)
inner_states = encoder_out["encoder_states"] if return_all_hiddens else None
return features, {
"inner_states": inner_states,
"encoder_embedding": encoder_out["encoder_embedding"][0],
}
def output_layer(self, features, masked_tokens=None, **unused):
return self.lm_head(features, masked_tokens)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.cfg.max_positions
| 18,697 | 35.096525 | 104 | py |
rej-summ | rej-summ-main/examples/adaptive_span/adaptive_span_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveMask(nn.Module):
"""Soft masking function for adaptive size.
It masks out the last K values of an input. The masking value
goes from 1 to 0 gradually, so K can be learned with
back-propagation.
Args:
max_size: maximum size (i.e. input dimension)
ramp_size: size of the ramp going from 0 to 1
init_val: initial size proportion not to be masked out
shape: learn multiple sizes independent of each other
"""
def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)):
nn.Module.__init__(self)
self._max_size = max_size
self._ramp_size = ramp_size
self.current_val = nn.Parameter(torch.zeros(*shape) + init_val)
mask_template = torch.linspace(1 - max_size, 0, steps=max_size)
self.register_buffer("mask_template", mask_template)
def forward(self, x):
mask = self.mask_template.float() + self.current_val.float() * self._max_size
mask = mask / self._ramp_size + 1
mask = mask.clamp(0, 1)
if x.size(-1) < self._max_size:
# the input could have been trimmed beforehand to save computation
mask = mask.narrow(-1, self._max_size - x.size(-1), x.size(-1))
x = (x * mask).type_as(x)
return x
def get_current_max_size(self, include_ramp=True):
current_size = math.ceil(self.current_val.max().item() * self._max_size)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def get_current_avg_size(self, include_ramp=True):
current_size = math.ceil(
self.current_val.float().mean().item() * self._max_size
)
if include_ramp:
current_size += self._ramp_size
current_size = max(0, min(self._max_size, current_size))
return current_size
def clamp_param(self):
"""this need to be called after each update"""
self.current_val.data.clamp_(0, 1)
class AdaptiveSpan(nn.Module):
"""Adaptive attention span for Transformerself.
This module learns an attention span length from data for each
self-attention head.
Args:
attn_span: maximum attention span
adapt_span_loss: loss coefficient for the span length
adapt_span_ramp: length of the masking ramp
adapt_span_init: initial size ratio
adapt_span_cache: adapt cache size to reduce memory usage
"""
def __init__(
self,
attn_span,
adapt_span_ramp,
adapt_span_init,
n_head,
adapt_span_layer,
**kargs
):
nn.Module.__init__(self)
self._max_span = attn_span
self._n_head = n_head
self._adapt_span_layer = adapt_span_layer
if self._adapt_span_layer:
self._mask = AdaptiveMask(
max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
)
else:
self._mask = AdaptiveMask(
max_size=self._max_span,
ramp_size=adapt_span_ramp,
init_val=adapt_span_init,
shape=(n_head, 1, 1),
)
def forward(self, attn, normalize=True):
"""mask attention with the right span"""
# batch and head dimensions are merged together, so separate them first
self.clamp_param()
if self._adapt_span_layer:
attn = self._mask(attn)
else:
B = attn.size(0) # batch size
M = attn.size(1) # block size
attn = attn.reshape(B // self._n_head, self._n_head, M, -1)
attn = self._mask(attn)
attn = attn.view(B, M, -1)
return attn
def get_trim_len(self):
"""how much of memory can be trimmed to reduce computation"""
L = self._max_span
trim_len = min(L - 1, L - self._mask.get_current_max_size())
# too fine granularity might be bad for the memory management
trim_len = math.floor(trim_len / 64) * 64
return trim_len
def trim_memory(self, query, key, value, key_pe):
"""trim out unnecessary memory beforehand to reduce computation"""
trim_len = self.get_trim_len()
cache_size = key.size(1) - query.size(1)
trim_len_cache = trim_len - (self._max_span - cache_size)
if trim_len_cache > 0:
key = key[:, trim_len_cache:, :]
value = value[:, trim_len_cache:, :]
elif trim_len_cache < 0:
# cache is too short! this happens when validation resumes
# after a lot of updates.
key = F.pad(key, [0, 0, -trim_len_cache, 0])
value = F.pad(value, [0, 0, -trim_len_cache, 0])
if trim_len > 0:
if key_pe is not None:
key_pe = key_pe[:, :, trim_len:]
return key, value, key_pe
def get_cache_size(self):
"""determine how long the cache should be"""
trim_len = self.get_trim_len()
# give a buffer of 64 steps since a span might increase
# in future updates
return min(self._max_span, self._max_span - trim_len + 64)
def get_loss(self):
"""a loss term for regularizing the span length"""
return self._max_span * self._mask.current_val.float().mean()
def get_current_max_span(self):
return self._mask.get_current_max_size()
def get_current_avg_span(self):
return self._mask.get_current_avg_size()
def clamp_param(self):
self._mask.clamp_param()
| 5,881 | 35.534161 | 85 | py |
rej-summ | rej-summ-main/examples/adaptive_span/adagrad_with_grad_clip.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim import Adagrad
from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adagrad_with_grad_clip")
class FairseqAdagradWithGradClip(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = AdagradWithGradClip(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D',
help='internal grad clip')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"weight_decay": self.args.weight_decay,
"grad_clip": self.args.adagrad_clip,
}
@property
def supports_flat_params(self):
return False
def _clip_grad(clr, grad, group_grad_clip):
if group_grad_clip > 0:
norm = grad.norm(2).item()
if norm > group_grad_clip:
clr *= group_grad_clip / (norm + 1e-10)
return clr
class AdagradWithGradClip(Adagrad):
"""Adagrad algorithm with custom gradient clipping"""
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
grad_clip=0,
):
Adagrad.__init__(
self,
params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
)
self.defaults["grad_clip"] = grad_clip
self.param_groups[0].setdefault("grad_clip", grad_clip)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state["step"] += 1
if group["weight_decay"] != 0:
if p.grad.data.is_sparse:
raise RuntimeError(
"weight_decay option is "
"not compatible with sparse "
"gradients"
)
grad = grad.add(group["weight_decay"], p.data)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
# clip
clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"])
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state["sum"].add_(make_sparse(grad_values.pow(2)))
std = state["sum"]._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state["sum"].addcmul_(1, grad, grad)
std = state["sum"].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
| 4,374 | 32.914729 | 92 | py |
rej-summ | rej-summ-main/examples/adaptive_span/adaptive_span_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.layer_norm import LayerNorm
from .adaptive_span_attention import AdaptiveSpan
# Size notations:
# B = batch_size, H = d_model, M = block_size, L = attn_span
def _skew(X, pad_value):
"""shift every row 1 step to right"""
# X = B x M x L
B, M, L = X.size()
X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1)
X = X.view(B, -1) # B x ML+MM+M
X = X[:, :-M] # B x ML+MM
X = X.view(B, M, M + L) # B x M x L+M
return X
def _unskew(X):
"""reverse _skew operation"""
# X = B x M x L+M
B, M, L = X.size()
L -= M
X = X.view(B, -1) # B x ML+MM
X = F.pad(X, (0, M)) # B x ML+MM+M
X = X.view(B, M, M + L + 1) # B x M x L+M+1
X = X[:, :, :L] # B x M x L
return X
class SeqAttention(nn.Module):
"""Sequential self-attention layer.
Each token will attend to its previous fixed number of steps.
Note that attention doesn't include the current step itself.
"""
def __init__(self, d_model, n_head, attn_span, dropout, adapt_span_layer, **kargs):
nn.Module.__init__(self)
self.dropout = nn.Dropout(dropout)
self.d_model = d_model # size of a single head
self.attn_span = attn_span
self.adaptive_span = AdaptiveSpan(
attn_span=attn_span,
n_head=n_head,
adapt_span_layer=adapt_span_layer,
**kargs
)
def forward(self, query, key, value, key_pe):
# query size = B x M x H
# key, value sizes = B x (M+L) x H
key, value, key_pe = self.adaptive_span.trim_memory(query, key, value, key_pe)
# compute attention from context
# B x M (dest) x (M+L) (src)
attn_cont = torch.matmul(query, key.transpose(-1, -2))
attn_cont = _unskew(attn_cont) # B x M x L
# compute the effect of position embedding
attn_pos = torch.matmul(query, key_pe) # B x M x L_pos
attn = attn_cont + attn_pos
attn = attn / math.sqrt(self.d_model) # B x M X L_pos
attn = F.softmax(attn.float(), dim=-1).type_as(attn)
# trim attention lengths according to the learned span
attn = self.adaptive_span(attn)
attn = self.dropout(attn) # B x M X L_pos
attn_cont = _skew(attn, 0) # B x M X (L+M)
out = torch.matmul(attn_cont, value) # B x M x H
return out
def get_cache_size(self):
return self.adaptive_span.get_cache_size()
class MultiHeadSeqAttention(nn.Module):
def __init__(self, d_model, n_head, **kargs):
nn.Module.__init__(self)
assert d_model % n_head == 0
self.n_head = n_head
self.head_dim = d_model // n_head
self.attn = SeqAttention(d_model=self.head_dim, n_head=n_head, **kargs)
self.proj_query = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_query.weight)
self.proj_out = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_out.weight)
self.proj_val = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_val.weight)
self.proj_key = nn.Linear(d_model, d_model, bias=False)
nn.init.xavier_normal_(self.proj_key.weight)
def head_reshape(self, x):
K = self.n_head
D = self.head_dim
x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D
x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D
x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D
return x
def forward(self, query, key, value, key_pe):
B = query.size(0)
K = self.n_head
D = self.head_dim
M = query.size(1)
query = self.proj_query(query)
query = self.head_reshape(query)
value = self.proj_val(value)
value = self.head_reshape(value)
key = self.proj_key(key)
key = self.head_reshape(key)
out = self.attn(query, key, value, key_pe) # B_K x M x D
out = out.view(B, K, M, D) # B x K x M x D
out = out.transpose(1, 2).contiguous() # B x M x K x D
out = out.view(B, M, -1) # B x M x K_D
out = self.proj_out(out)
return out
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_inner, dropout, **kargs):
nn.Module.__init__(self)
self.fc1 = nn.Linear(d_model, d_inner)
self.fc2 = nn.Linear(d_inner, d_model)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, h):
h1 = F.relu(self.fc1(h))
h1 = self.dropout(h1)
h2 = self.fc2(h1)
return h2
class TransformerSeqLayer(nn.Module):
def __init__(self, d_model, **kargs):
nn.Module.__init__(self)
self.attn = MultiHeadSeqAttention(d_model=d_model, **kargs)
self.norm1 = LayerNorm(d_model)
self.ff = FeedForwardLayer(d_model=d_model, **kargs)
self.norm2 = LayerNorm(d_model)
def forward(self, h, h_cache, key_pe):
# h = B x M x H
# h_cache = B x L x H
h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H
attn_out = self.attn(h, h_all, h_all, key_pe)
h = self.norm1(h + attn_out) # B x M x H
if self.ff is not None:
ff_out = self.ff(h)
out = self.norm2(h + ff_out) # B x M x H
else:
out = h
return out
def get_cache_size(self):
return self.attn.attn.get_cache_size()
class TransformerSeq(nn.Module):
def __init__(
self,
vocab_size,
d_model,
n_head,
n_layer,
attn_span,
emb_dropout,
aux_loss_scaler,
adapt_span_layer,
**kargs
):
nn.Module.__init__(self)
# token embeddings
self.in_emb = nn.Embedding(vocab_size, d_model)
nn.init.normal_(self.in_emb.weight, mean=0, std=d_model ** -0.5)
self.out_emb = nn.Linear(d_model, vocab_size)
self.aux_loss_scaler = aux_loss_scaler
if emb_dropout > 0:
self.emb_dropout = nn.Dropout(emb_dropout)
else:
self.emb_dropout = None
# position embeddings
self.key_pe = nn.Parameter(torch.randn(1, d_model // n_head, attn_span))
self.layers = nn.ModuleList()
self.layers.extend(
TransformerSeqLayer(
d_model=d_model,
n_head=n_head,
attn_span=attn_span,
adapt_span_layer=adapt_span_layer,
**kargs
)
for _ in range(n_layer)
)
def forward(self, x, h_cache, target=None):
# x size = B x M
block_size = x.size(1)
h = self.in_emb(x) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
h_cache_next = []
for l, layer in enumerate(self.layers):
cache_size = layer.attn.attn.get_cache_size()
if cache_size > block_size:
h_cache_next_l = torch.cat(
[h_cache[l][:, -cache_size + block_size :, :], h], dim=1
).detach()
else:
h_cache_next_l = h[:, -cache_size:, :].detach()
h_cache_next.append(h_cache_next_l)
h = layer(h, h_cache[l], self.key_pe) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
out = F.log_softmax(self.out_emb(h).float(), dim=-1).type_as(h)
dummy_loss = None
return out, h_cache_next, dummy_loss
def get_aux_loss(self):
loss = 0.0
for layer in self.layers:
loss += layer.attn.attn.adaptive_span.get_loss()
return self.aux_loss_scaler * loss
def get_current_max_span(self):
max_span = 0.0
for layer in self.layers:
max_span = max(
max_span, layer.attn.attn.adaptive_span.get_current_max_span()
)
return max_span
def get_current_avg_span(self):
avg_span = 0.0
for layer in self.layers:
avg_span += layer.attn.attn.adaptive_span.get_current_avg_span()
return avg_span / len(self.layers)
| 8,540 | 31.352273 | 87 | py |
rej-summ | rej-summ-main/examples/adaptive_span/adaptive_span_loss.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class AdaptiveSpanCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
@register_criterion("adaptive_span_loss", dataclass=AdaptiveSpanCriterionConfig)
class AdaptiveSpanCriterion(CrossEntropyCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task, sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss here is summed, different from the adaptive span code
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, aux_loss, avg_span, max_span = self.compute_loss(
model, net_output, sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
loss /= sample_size
total_loss = loss + aux_loss
sample_size = 1
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"total_loss": total_loss.data,
"avg_span": avg_span * sample_size,
"max_span": max_span * sample_size,
}
return total_loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
loss, _ = super().compute_loss(model, net_output, sample, reduce)
aux_loss = model.get_aux_loss()
avg_span = model.get_current_avg_span()
max_span = model.get_current_max_span()
return loss, aux_loss, avg_span, max_span
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs)
avg_span_sum = sum(log.get("avg_span", 0) for log in logging_outputs)
max_span_sum = sum(log.get("max_span", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("avg_span", avg_span_sum / sample_size, sample_size, round=3)
metrics.log_scalar("max_span", max_span_sum / sample_size, sample_size, round=3)
# total loss contains the L1 norm on adaptive-span
metrics.log_scalar(
"total_loss",
total_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,233 | 38.570093 | 88 | py |
rej-summ | rej-summ-main/examples/adaptive_span/adaptive_span_model_wrapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from .adaptive_span_model import TransformerSeq as AdaptiveSpanTransformerModel
logger = logging.getLogger(__name__)
@dataclass
class AdaptiveSpanSmallConfig(FairseqDataclass):
# defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh
vocab_size: int = 50
d_model: int = 256
n_head: int = 4
d_inner: int = 1024
n_layer: int = 8
attn_span: int = 1024
dropout: float = 0.0
emb_dropout: float = 0.0
adapt_span_ramp: int = 32
adapt_span_init: float = 0.0
aux_loss_scaler: float = 0.000002
adapt_span_layer: bool = False
@register_model("adaptive_span", dataclass=AdaptiveSpanSmallConfig)
class AdaptiveSpanTransformer(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: AdaptiveSpanSmallConfig, task):
return cls(AdaptiveSpanDecoder(cfg, task))
def get_aux_loss(self):
return self.decoder.get_aux_loss()
def get_current_max_span(self):
return self.decoder.get_current_max_span()
def get_current_avg_span(self):
return self.decoder.get_current_avg_span()
class AdaptiveSpanDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
super().__init__(task.target_dictionary)
self.config = cfg
config = AdaptiveSpanSmallConfig(
vocab_size=len(task.target_dictionary),
d_model=cfg.d_model,
n_head=cfg.n_head,
d_inner=cfg.d_inner,
n_layer=cfg.n_layer,
attn_span=cfg.attn_span,
dropout=cfg.dropout,
emb_dropout=cfg.emb_dropout,
adapt_span_ramp=cfg.adapt_span_ramp,
adapt_span_init=cfg.adapt_span_init,
aux_loss_scaler=cfg.aux_loss_scaler,
adapt_span_layer=cfg.adapt_span_layer,
)
logger.info(config)
self.model = AdaptiveSpanTransformerModel(**config.__dict__)
self._mems = None
def forward(
self,
src_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
bsz = src_tokens.size(0)
if incremental_state is not None: # used during inference
mems = self.get_incremental_state("mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
if mems is None:
# first time init
mems = self.init_hid_cache(bsz)
output = self.model(x=src_tokens, h_cache=mems,)
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.config.attn_span
def init_hid_cache(self, batch_sz):
hid = []
for layer in self.model.layers:
param = next(self.model.parameters())
h = torch.zeros(
batch_sz,
layer.get_cache_size(),
self.config.d_model,
dtype=param.dtype,
device=param.device,
)
hid.append(h)
return hid
def get_aux_loss(self):
return self.model.get_aux_loss()
def get_current_max_span(self):
return self.model.get_current_max_span()
def get_current_avg_span(self):
return self.model.get_current_avg_span()
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
raise NotImplementedError("This is required for generation/beam search")
# mems = self.get_incremental_state(incremental_state, "mems")
# if mems is not None:
# new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
# self.set_incremental_state(incremental_state, "mems", new_mems)
| 4,692 | 31.143836 | 114 | py |
rej-summ | rej-summ-main/examples/MMPT/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mmpt",
version="0.0.1",
author="Hu Xu, Po-yao Huang",
author_email="huxu@fb.com",
description="A package for multimodal pretraining.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pytorch/fairseq/examples/MMPT",
packages=setuptools.find_packages(),
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: CC-BY-NC",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 668 | 25.76 | 59 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt_cli/predict.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import argparse
import pprint
import omegaconf
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from mmpt.utils import load_config, set_seed
from mmpt.evaluators import Evaluator
from mmpt.evaluators import predictor as predictor_path
from mmpt.tasks import Task
from mmpt import processors
from mmpt.datasets import MMDataset
def get_dataloader(config):
meta_processor_cls = getattr(processors, config.dataset.meta_processor)
video_processor_cls = getattr(processors, config.dataset.video_processor)
text_processor_cls = getattr(processors, config.dataset.text_processor)
aligner_cls = getattr(processors, config.dataset.aligner)
meta_processor = meta_processor_cls(config.dataset)
video_processor = video_processor_cls(config.dataset)
text_processor = text_processor_cls(config.dataset)
aligner = aligner_cls(config.dataset)
test_data = MMDataset(
meta_processor,
video_processor,
text_processor,
aligner,
)
print("test_len", len(test_data))
output = test_data[0]
test_data.print_example(output)
test_dataloader = DataLoader(
test_data,
batch_size=config.fairseq.dataset.batch_size,
shuffle=False,
num_workers=6,
collate_fn=test_data.collater,
)
return test_dataloader
def main(args):
config = load_config(args)
if isinstance(config, omegaconf.dictconfig.DictConfig):
print(OmegaConf.to_yaml(config))
else:
pp = pprint.PrettyPrinter(indent=4)
pp.print(config)
mmtask = Task.config_task(config)
mmtask.build_model()
test_dataloader = get_dataloader(config)
checkpoint_search_path = os.path.dirname(config.eval.save_path)
results = []
prefix = os.path.basename(args.taskconfig)
if prefix.startswith("test"):
# loop all checkpoint for datasets without validation set.
if "best" not in config.fairseq.common_eval.path:
print("eval each epoch.")
for checkpoint in glob.glob(checkpoint_search_path + "/checkpoint*"):
model = mmtask.load_checkpoint(checkpoint)
ckpt = os.path.basename(checkpoint)
evaluator = Evaluator(config)
output = evaluator.evaluate(
model, test_dataloader, ckpt + "_merged")
results.append((checkpoint, output))
# use the one specified by the config lastly.
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
evaluator = Evaluator(config)
output = evaluator.evaluate(model, test_dataloader)
results.append((config.fairseq.common_eval.path, output))
best_result = None
best_metric = 0.
for checkpoint, result in results:
print(checkpoint)
evaluator.metric.print_computed_metrics(result)
best_score = evaluator.metric.best_metric(result)
if best_score > best_metric:
best_result = (checkpoint, result)
best_metric = best_score
print("best results:")
print(best_result[0])
evaluator.metric.print_computed_metrics(best_result[1])
elif prefix.startswith("vis"):
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
predictor_cls = getattr(predictor_path, config.predictor)
predictor = predictor_cls(config)
predictor.predict_loop(model, test_dataloader, mmtask, None)
else:
raise ValueError("unknown prefix of the config file", args.taskconfig)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("taskconfig", type=str)
args = parser.parse_args()
main(args)
| 3,937 | 33.54386 | 81 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/modules/mm.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers.modeling_bert import (
BertEmbeddings,
ACT2FN,
)
except ImportError:
pass
class VideoTokenMLP(nn.Module):
def __init__(self, config):
super().__init__()
input_dim = config.input_dim if hasattr(config, "input_dim") else 512
self.linear1 = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.linear2 = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.linear1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.linear2(hidden_states)
return hidden_states
class MMBertEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.max_video_len = config.max_video_len
if hasattr(config, "use_seg_emb") and config.use_seg_emb:
"""the original VLM paper uses seg_embeddings for temporal space.
although not used it changed the randomness of initialization.
we keep it for reproducibility.
"""
self.seg_embeddings = nn.Embedding(256, config.hidden_size)
def forward(
self,
input_ids,
input_video_embeds,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
input_tensor = input_ids if input_ids is not None else inputs_embeds
if input_video_embeds is not None:
input_shape = (
input_tensor.size(0),
input_tensor.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (input_tensor.size(0), input_tensor.size(1))
if position_ids is None:
"""
Auto skip position embeddings for text only case.
use cases:
(1) action localization and segmentation:
feed in len-1 dummy video token needs text part to
skip input_video_embeds.size(1) for the right
position_ids for video [SEP] and rest text tokens.
(2) MMFusionShare for two forward passings:
in `forward_text`: input_video_embeds is None.
need to skip video [SEP] token.
# video_len + 1: [CLS] + video_embed
# self.max_video_len + 1: [SEP] for video.
# self.max_video_len + 2: [SEP] for video.
# self.max_video_len + input_ids.size(1): rest for text.
"""
if input_video_embeds is not None:
video_len = input_video_embeds.size(1)
starting_offset = self.max_video_len + 1 # video [SEP]
ending_offset = self.max_video_len + input_ids.size(1)
else:
video_len = 0
starting_offset = self.max_video_len + 2 # first text token.
ending_offset = self.max_video_len + input_ids.size(1) + 1
position_ids = torch.cat([
self.position_ids[:, :video_len + 1],
self.position_ids[:, starting_offset:ending_offset]
], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
"""
the format of input_ids is [CLS] [SEP] caption [SEP] padding.
the goal is to build [CLS] video tokens [SEP] caption [SEP] .
"""
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if input_video_embeds is not None:
inputs_mm_embeds = torch.cat([
inputs_embeds[:, :1], input_video_embeds, inputs_embeds[:, 1:]
], dim=1)
else:
# text only for `MMFusionShare`.
inputs_mm_embeds = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_mm_embeds + position_embeddings
embeddings += token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AlignHead(nn.Module):
"""this will load pre-trained weights for NSP, which is desirable."""
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, dropout_pooled_output):
logits = self.seq_relationship(dropout_pooled_output)
return logits
| 5,537 | 36.931507 | 83 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/modules/vectorpool.py | # Copyright (c) Facebook, Inc. All Rights Reserved
import torch
import os
import numpy as np
import pickle
from . import retri
from ..utils import get_local_rank
class VectorPool(object):
"""
Base class of retrieval space.
"""
def __init__(self, config):
from transformers import AutoConfig
self.hidden_size = AutoConfig.from_pretrained(
config.dataset.bert_name).hidden_size
self.retriever_cls = getattr(retri, config.retriever_cls)
def __call__(self, sample, **kwargs):
raise NotImplementedError
def build_retriver(
self,
retriever_cls=None,
hidden_size=None,
centroids=512,
db_type="flatl2",
examples_per_cent_to_train=48
):
"""merge results from multiple gpus and return a retriver.."""
self.retriver = retriever_cls(
hidden_size, centroids, db_type, examples_per_cent_to_train)
return self.retriver
def __repr__(self):
if hasattr(self, "retriver"):
retriver_name = str(len(self.retriver))
else:
retriver_name = "no retriver field yet"
return self.__class__.__name__ \
+ "(" + retriver_name + ")"
class VideoVectorPool(VectorPool):
"""
average clips of a video as video representation.
"""
def __init__(self, config):
super().__init__(config)
self.build_retriver(self.retriever_cls, self.hidden_size)
def __call__(self, sample, subsampling, **kwargs):
hidden_states = (
sample["pooled_video"] + sample["pooled_text"]) / 2.
hidden_states = hidden_states.view(
-1, subsampling,
hidden_states.size(-1))
hidden_states = torch.mean(hidden_states, dim=1)
hidden_states = hidden_states.cpu().detach().numpy()
video_ids = []
for offset_idx, video_id in enumerate(sample["video_id"]):
if isinstance(video_id, tuple) and len(video_id) == 3:
# a sharded video_id.
video_id = video_id[0]
video_ids.append(video_id)
assert len(video_ids) == len(hidden_states)
self.retriver.add(
hidden_states.astype("float32"),
video_ids
)
class DistributedVectorPool(VectorPool):
"""
support sync of multiple gpus/nodes.
"""
def __init__(self, config):
super().__init__(config)
self.out_dir = os.path.join(
config.fairseq.checkpoint.save_dir,
"retri")
os.makedirs(self.out_dir, exist_ok=True)
self.hidden_states = []
self.video_ids = []
def build_retriver(
self,
retriever_cls=None,
hidden_size=None,
centroids=4096,
db_type="flatl2",
examples_per_cent_to_train=48
):
if retriever_cls is None:
retriever_cls = self.retriever_cls
if hidden_size is None:
hidden_size = self.hidden_size
"""merge results from multiple gpus and return a retriver.."""
if torch.distributed.is_initialized():
self.save()
# sync saving.
torch.distributed.barrier()
world_size = torch.distributed.get_world_size()
else:
world_size = 1
self.retriver = retriever_cls(
hidden_size, centroids, db_type, examples_per_cent_to_train)
# each gpu process has its own retriever.
for local_rank in range(world_size):
if get_local_rank() == 0:
print("load local_rank", local_rank)
hidden_states, video_ids = self.load(local_rank)
hidden_states = hidden_states.astype("float32")
self.retriver.add(hidden_states, video_ids)
return self.retriver
def load(self, local_rank):
hidden_states = np.load(
os.path.join(
self.out_dir,
"hidden_state" + str(local_rank) + ".npy"
)
)
with open(
os.path.join(
self.out_dir, "video_id" + str(local_rank) + ".pkl"),
"rb") as fr:
video_ids = pickle.load(fr)
return hidden_states, video_ids
def save(self):
hidden_states = np.vstack(self.hidden_states)
assert len(hidden_states) == len(self.video_ids), "{}, {}".format(
len(hidden_states),
len(self.video_ids)
)
local_rank = torch.distributed.get_rank() \
if torch.distributed.is_initialized() else 0
np.save(
os.path.join(
self.out_dir,
"hidden_state" + str(local_rank) + ".npy"),
hidden_states)
with open(
os.path.join(
self.out_dir,
"video_id" + str(local_rank) + ".pkl"),
"wb") as fw:
pickle.dump(
self.video_ids,
fw,
protocol=pickle.HIGHEST_PROTOCOL
)
class DistributedVideoVectorPool(DistributedVectorPool):
"""
average clips of a video as video representation.
"""
def __call__(self, sample, subsampling, **kwargs):
hidden_states = (
sample["pooled_video"] + sample["pooled_text"]) / 2.
hidden_states = hidden_states.view(
-1, subsampling,
hidden_states.size(-1))
hidden_states = torch.mean(hidden_states, dim=1)
hidden_states = hidden_states.cpu().detach().numpy()
video_ids = []
for offset_idx, video_id in enumerate(sample["video_id"]):
if isinstance(video_id, tuple) and len(video_id) == 3:
# a sharded video_id.
video_id = video_id[0]
video_ids.append(video_id)
assert len(video_ids) == len(hidden_states)
self.hidden_states.append(hidden_states)
self.video_ids.extend(video_ids)
# ------------ the following are deprecated --------------
class TextClipVectorPool(VectorPool):
def __init__(self, config):
from transformers import AutoConfig
hidden_size = AutoConfig.from_pretrained(
config.dataset.bert_name).hidden_size
retriever_cls = getattr(retri, config.retriever_cls)
self.build_retriver(retriever_cls, hidden_size)
def __call__(self, sample, **kwargs):
clip_meta = sample["clip_meta"].cpu()
assert torch.all(torch.le(clip_meta[:, 4], clip_meta[:, 5]))
text_meta = [tuple(item.tolist()) for item in clip_meta[:, 3:]]
if hasattr(self, "retriver"):
# build_retriver is called.
self.retriver.add(
sample["pooled_text"].cpu().numpy().astype("float32"),
text_meta
)
else:
raise NotImplementedError
class MMClipVectorPool(VectorPool):
"""
Multimodal Clip-level vector pool.
"""
def __init__(self, out_dir):
"""use hidden_states to store `(video, text)`."""
"""use video_ids to store `(video_id, start, end)`."""
super().__init__(out_dir)
def __call__(self, sample, **kwargs):
pooled_video = sample["pooled_video"].cpu().unsqueeze(1).numpy()
pooled_text = sample["pooled_text"].cpu().unsqueeze(1).numpy()
self.hidden_states.append(
np.concatenate([pooled_video, pooled_text], axis=1)
)
video_starts = sample["video_start"].cpu()
video_ends = sample["video_end"].cpu()
assert torch.all(torch.le(video_starts, video_ends))
text_starts = sample["text_start"].cpu()
text_ends = sample["text_end"].cpu()
assert torch.all(torch.le(text_starts, text_ends))
subsample_size = sample["pooled_video"].size(0) // len(sample["video_id"])
video_ids = [video_id for video_id in sample["video_id"]
for _ in range(subsample_size)
]
for video_id, video_start, video_end, text_start, text_end in zip(
video_ids, video_starts, video_ends, text_starts, text_ends):
self.video_ids.append((
video_id,
(int(video_start), int(video_end)),
(int(text_start), int(text_end))
))
| 8,278 | 32.518219 | 82 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/models/transformermodel.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers.modeling_bert import (
BertPreTrainedModel,
BertModel,
BertEncoder,
BertPredictionHeadTransform,
)
except ImportError:
pass
from ..modules import VideoTokenMLP, MMBertEmbeddings
# --------------- fine-tuning models ---------------
class MMBertForJoint(BertPreTrainedModel):
"""A BertModel with isolated attention mask to separate modality."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
video_tokens = self.videomlp(input_video_embeds)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
separate_forward_split=separate_forward_split,
)
return outputs
class MMBertForTokenClassification(BertPreTrainedModel):
"""A BertModel similar to MMJointUni, with extra wrapper layer
to be fine-tuned from other pretrained MMFusion model."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# TODO(huxu): 779 is the number of classes for COIN: move to config?
self.classifier = nn.Linear(config.hidden_size, 779)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
video_tokens = self.videomlp(input_video_embeds)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
separate_forward_split=separate_forward_split,
)
return (self.classifier(outputs[0]),)
# ------------ pre-training models ----------------
class MMBertForEncoder(BertPreTrainedModel):
"""A BertModel for Contrastive Learning."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
if input_video_embeds is not None:
video_tokens = self.videomlp(input_video_embeds)
else:
video_tokens = None
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return outputs
class MMBertForMFMMLM(BertPreTrainedModel):
"""A BertModel with shared prediction head on MFM-MLM."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.cls = MFMMLMHead(config)
self.hidden_size = config.hidden_size
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_frame_labels=None,
target_video_hidden_states=None,
non_masked_frame_mask=None,
masked_lm_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
if input_video_embeds is not None:
video_tokens = self.videomlp(input_video_embeds)
else:
video_tokens = None
if target_video_hidden_states is not None:
target_video_hidden_states = self.videomlp(
target_video_hidden_states)
non_masked_frame_hidden_states = video_tokens.masked_select(
non_masked_frame_mask.unsqueeze(-1)
).view(-1, self.hidden_size)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
mfm_scores, prediction_scores = None, None
if masked_frame_labels is not None and masked_lm_labels is not None:
# split the sequence.
text_offset = masked_frame_labels.size(1) + 1 # [CLS]
video_sequence_output = sequence_output[
:, 1:text_offset
] # remove [SEP] as not in video_label.
text_sequence_output = torch.cat(
[sequence_output[:, :1], sequence_output[:, text_offset:]],
dim=1
)
hidden_size = video_sequence_output.size(-1)
selected_video_output = video_sequence_output.masked_select(
masked_frame_labels.unsqueeze(-1)
).view(-1, hidden_size)
# only compute select tokens to training to speed up.
hidden_size = text_sequence_output.size(-1)
# masked_lm_labels = masked_lm_labels.reshape(-1)
labels_mask = masked_lm_labels != -100
selected_text_output = text_sequence_output.masked_select(
labels_mask.unsqueeze(-1)
).view(-1, hidden_size)
mfm_scores, prediction_scores = self.cls(
selected_video_output,
target_video_hidden_states,
non_masked_frame_hidden_states,
selected_text_output,
)
output = (
mfm_scores,
prediction_scores,
) + outputs
return output
class BertMFMMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly
# resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
video_logits, text_logits = None, None
if video_hidden_states is not None:
video_hidden_states = self.transform(video_hidden_states)
non_masked_frame_logits = torch.mm(
video_hidden_states,
non_masked_frame_hidden_states.transpose(1, 0)
)
masked_frame_logits = torch.bmm(
video_hidden_states.unsqueeze(1),
target_video_hidden_states.unsqueeze(-1),
).squeeze(-1)
video_logits = torch.cat(
[masked_frame_logits, non_masked_frame_logits], dim=1
)
if text_hidden_states is not None:
text_hidden_states = self.transform(text_hidden_states)
text_logits = self.decoder(text_hidden_states)
return video_logits, text_logits
class MFMMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertMFMMLMPredictionHead(config)
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
video_logits, text_logits = self.predictions(
video_hidden_states,
target_video_hidden_states,
non_masked_frame_hidden_states,
text_hidden_states,
)
return video_logits, text_logits
class MMBertForMTM(MMBertForMFMMLM):
def __init__(self, config):
BertPreTrainedModel.__init__(self, config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.cls = MTMHead(config)
self.hidden_size = config.hidden_size
self.init_weights()
class BertMTMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
non_masked_frame_hidden_states = non_masked_frame_hidden_states.transpose(1, 0)
video_logits, text_logits = None, None
if video_hidden_states is not None:
video_hidden_states = self.transform(video_hidden_states)
masked_frame_logits = torch.bmm(
video_hidden_states.unsqueeze(1),
target_video_hidden_states.unsqueeze(-1),
).squeeze(-1)
non_masked_frame_logits = torch.mm(
video_hidden_states,
non_masked_frame_hidden_states
)
video_on_vocab_logits = self.decoder(video_hidden_states)
video_logits = torch.cat([
masked_frame_logits,
non_masked_frame_logits,
video_on_vocab_logits], dim=1)
if text_hidden_states is not None:
text_hidden_states = self.transform(text_hidden_states)
# text first so label does not need to be shifted.
text_on_vocab_logits = self.decoder(text_hidden_states)
text_on_video_logits = torch.mm(
text_hidden_states,
non_masked_frame_hidden_states
)
text_logits = torch.cat([
text_on_vocab_logits,
text_on_video_logits
], dim=1)
return video_logits, text_logits
class MTMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertMTMPredictionHead(config)
def forward(
self,
video_hidden_states=None,
target_video_hidden_states=None,
non_masked_frame_hidden_states=None,
text_hidden_states=None,
):
video_logits, text_logits = self.predictions(
video_hidden_states,
target_video_hidden_states,
non_masked_frame_hidden_states,
text_hidden_states,
)
return video_logits, text_logits
class MMBertModel(BertModel):
"""MMBertModel has MMBertEmbedding to support video tokens."""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
# overwrite embedding
self.embeddings = MMBertEmbeddings(config)
self.encoder = MultiLayerAttentionMaskBertEncoder(config)
self.init_weights()
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids "
"and inputs_embeds at the same time"
)
elif input_ids is not None:
if input_video_embeds is not None:
input_shape = (
input_ids.size(0),
input_ids.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (
input_ids.size(0),
input_ids.size(1),
)
elif inputs_embeds is not None:
if input_video_embeds is not None:
input_shape = (
inputs_embeds.size(0),
inputs_embeds.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (
input_ids.size(0),
input_ids.size(1),
)
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None \
else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case
# we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = \
self.get_extended_attention_mask(
attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (
encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or
# [num_hidden_layers x num_heads]
# and head_mask is converted to shape
# [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(
head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids,
input_video_embeds,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
if separate_forward_split is not None:
split_embedding_output = \
embedding_output[:, :separate_forward_split]
split_extended_attention_mask = extended_attention_mask[
:, :, :, :separate_forward_split, :separate_forward_split
]
split_encoder_outputs = self.encoder(
split_embedding_output,
attention_mask=split_extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
assert (
len(split_encoder_outputs) <= 2
), "we do not support merge on attention for now."
encoder_outputs = []
encoder_outputs.append([split_encoder_outputs[0]])
if len(split_encoder_outputs) == 2:
encoder_outputs.append([])
for _all_hidden_states in split_encoder_outputs[1]:
encoder_outputs[-1].append([_all_hidden_states])
split_embedding_output = \
embedding_output[:, separate_forward_split:]
split_extended_attention_mask = extended_attention_mask[
:, :, :, separate_forward_split:, separate_forward_split:
]
split_encoder_outputs = self.encoder(
split_embedding_output,
attention_mask=split_extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
assert (
len(split_encoder_outputs) <= 2
), "we do not support merge on attention for now."
encoder_outputs[0].append(split_encoder_outputs[0])
encoder_outputs[0] = torch.cat(encoder_outputs[0], dim=1)
if len(split_encoder_outputs) == 2:
for layer_idx, _all_hidden_states in enumerate(
split_encoder_outputs[1]
):
encoder_outputs[1][layer_idx].append(_all_hidden_states)
encoder_outputs[1][layer_idx] = torch.cat(
encoder_outputs[1][layer_idx], dim=1
)
encoder_outputs = tuple(encoder_outputs)
else:
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
return (sequence_output, pooled_output) + encoder_outputs[1:]
def get_extended_attention_mask(self, attention_mask, input_shape, device):
"""This is borrowed from `modeling_utils.py` with the support of
multi-layer attention masks.
The second dim is expected to be number of layers.
See `MMAttentionMaskProcessor`.
Makes broadcastable attention and causal masks so that future
and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to,
zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, \
with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable
# to all heads.
if attention_mask.dim() == 4:
extended_attention_mask = attention_mask[:, :, None, :, :]
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) \
* -10000.0
return extended_attention_mask
else:
return super().get_extended_attention_mask(
attention_mask, input_shape, device
)
class MultiLayerAttentionMaskBertEncoder(BertEncoder):
"""extend BertEncoder with the capability of
multiple layers of attention mask."""
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_attention_mask = (
attention_mask[:, i, :, :, :]
if attention_mask.dim() == 5
else attention_mask
)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
layer_attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
layer_attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [hidden_states, all_hidden_states, all_attentions]
if v is not None
)
| 26,064 | 34.462585 | 87 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/models/mmfusionnlg.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch.nn import functional as F
from typing import Optional, Iterable
try:
from transformers import BertPreTrainedModel
from transformers.modeling_bert import BertOnlyMLMHead
from transformers.file_utils import ModelOutput
from transformers.modeling_outputs import CausalLMOutput
from transformers.generation_utils import (
BeamHypotheses,
top_k_top_p_filtering
)
except ImportError:
pass
from .mmfusion import MMFusion
from .transformermodel import MMBertModel
from ..modules import VideoTokenMLP
class MMFusionNLG(MMFusion):
def __init__(self, config, **kwargs):
super().__init__(config)
if config.model.max_decode_length is not None:
self.max_length = min(
config.model.max_decode_length,
config.dataset.max_len - config.dataset.max_video_len - 3
)
else:
self.max_length = \
config.dataset.max_len - config.dataset.max_video_len - 3
self.gen_param = config.gen_param if config.gen_param is not None \
else {}
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask,
video_label=None,
text_label=None,
**kwargs
):
"""use pre-trained LM header for generation."""
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
masked_lm_labels=text_label,
)
return {"logits": outputs[0]}
@torch.no_grad()
def generate(
self,
caps, cmasks, vfeats, vmasks,
attention_mask=None,
bos_token_id=None,
eos_token_id=None,
**kwargs
):
# a simplified interface from
# https://huggingface.co/transformers/v3.4.0/_modules/transformers/generation_utils.html#GenerationMixin.generate
# caps now only have
# [CLS], [SEP] (for video) and [CLS] (as bos_token)
assert caps.size(1) == 3
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
output = self.mm_encoder.generate(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
max_length=self.max_length,
**self.gen_param
)
return output
class MMBertForNLG(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = MMBertModel(config)
self.videomlp = VideoTokenMLP(config)
# we do not use `BertGenerationOnlyLMHead`
# because we can reuse pretraining.
self.cls = BertOnlyMLMHead(config)
self.hidden_size = config.hidden_size
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# similar to MMBertForMFMMLM without MFM.
video_tokens = self.videomlp(input_video_embeds)
outputs = self.bert(
input_ids,
video_tokens,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = None
if masked_lm_labels is not None:
text_offset = input_video_embeds.size(1) + 1 # [CLS]
# recover caps format: [CLS] [SEP] text [SEP]
text_sequence_output = torch.cat(
[sequence_output[:, :1], sequence_output[:, text_offset:]],
dim=1
)
# only compute select tokens to training to speed up.
hidden_size = text_sequence_output.size(-1)
# masked_lm_labels = masked_lm_labels.reshape(-1)
labels_mask = masked_lm_labels != -100
selected_text_output = text_sequence_output.masked_select(
labels_mask.unsqueeze(-1)
).view(-1, hidden_size)
prediction_scores = self.cls(selected_text_output)
if not return_dict:
output = (
prediction_scores,
) + outputs[2:]
return output
# for generation.
text_offset = input_video_embeds.size(1) + 2 # [CLS]
text_sequence_output = sequence_output[:, text_offset:]
prediction_scores = self.cls(text_sequence_output)
return CausalLMOutput(
loss=None,
logits=prediction_scores,
)
def prepare_inputs_for_generation(
self,
input_ids,
input_video_embeds,
attention_mask=None,
token_type_ids=None,
**model_kwargs
):
# must return a dictionary.
seq_len = input_ids.size(1) + input_video_embeds.size(1)
if attention_mask is not None:
if len(attention_mask.size()) == 4:
attention_mask = attention_mask[:, :, :seq_len, :seq_len]
elif len(attention_mask.size()) == 3:
attention_mask = attention_mask[:, :seq_len, :seq_len]
else:
attention_mask = attention_mask[:, :seq_len]
if token_type_ids is not None:
token_type_ids = token_type_ids[:, :seq_len]
return {
"input_ids": input_ids,
"input_video_embeds": input_video_embeds,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
**model_kwargs
) -> torch.LongTensor:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes
it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`.
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
initial input_ids for the decoder of encoder-decoder type models. If :obj:`None` then only
decoder_start_token_id is passed as the first token to the decoder.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults tp 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(:obj:`List[int]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`:
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1),
bos_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
print(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
else:
raise ValueError("either self.config.vocab_size or self.config.decoder.vocab_size needs to be defined")
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
# see if BOS token can be used for decoder_start_token_id
if bos_token_id is not None:
decoder_start_token_id = bos_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "bos_token_id")
and self.config.decoder.bos_token_id is not None
):
decoder_start_token_id = self.config.decoder.bos_token_id
else:
raise ValueError(
"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
)
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: ModelOutput = encoder(input_ids, attention_mask=attention_mask, return_dict=True)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
# TODO: make this a call-back function.
# input_ids=caps,
# input_video_embeds=vfeats,
# attention_mask=attention_mask,
# token_type_ids=token_type_ids,
input_video_embeds = model_kwargs.pop("input_video_embeds", None)
token_type_ids = model_kwargs.pop("token_type_ids", None)
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len)
input_video_embeds_len, input_video_embeds_hidden = input_video_embeds.size(1), input_video_embeds.size(2)
input_video_embeds = input_video_embeds.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_video_embeds_len, input_video_embeds_hidden)
attention_mask_from_len, attention_mask_to_len = attention_mask.size(1), attention_mask.size(2)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, attention_mask_from_len, attention_mask_to_len
)
token_type_ids_len = token_type_ids.size(1)
token_type_ids = token_type_ids.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, token_type_ids_len
)
# contiguous ...
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
input_video_embeds = input_video_embeds.contiguous().view(
effective_batch_size * num_beams, input_video_embeds_len, input_video_embeds_hidden)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, attention_mask_from_len, attention_mask_to_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
token_type_ids = token_type_ids.contiguous().view(
effective_batch_size * num_beams, token_type_ids_len
)
model_kwargs["input_video_embeds"] = input_video_embeds
model_kwargs["token_type_ids"] = token_type_ids
if self.config.is_encoder_decoder:
device = next(self.parameters()).device
if decoder_input_ids is not None:
# give initial decoder input ids
input_ids = decoder_input_ids.repeat(effective_batch_size * num_beams, 1).to(device)
else:
# create empty decoder input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=device,
)
cur_len = input_ids.shape[-1]
assert (
batch_size == encoder_outputs.last_hidden_state.shape[0]
), f"expected encoder_outputs.last_hidden_state to have 1st dimension bs={batch_size}, got {encoder_outputs.last_hidden_state.shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select(
0, expanded_batch_idxs
)
# save encoder_outputs in `model_kwargs`
model_kwargs["encoder_outputs"] = encoder_outputs
else:
cur_len = input_ids.shape[-1]
assert (
cur_len < max_length
), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
attention_mask=attention_mask,
use_cache=use_cache,
model_kwargs=model_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
attention_mask=attention_mask,
use_cache=use_cache,
model_kwargs=model_kwargs,
)
return output
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
attention_mask,
use_cache,
model_kwargs,
):
"""Generate sequences for each example with beam search."""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs
)
outputs = self(**model_inputs, return_dict=True) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if "past_key_values" in outputs:
past = outputs.past_key_values
elif "mems" in outputs:
past = outputs.mems
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
scores = self.postprocess_next_token_scores(
scores=scores,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=num_beams,
)
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Temperature
if temperature != 1.0:
_scores = _scores / temperature
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence, add a pad token
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content, this will get added to next_batch_beam
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(),
beam_token_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# once the beam for next step is full, don't add more tokens to it.
if len(next_sent_beam) == num_beams:
break
# Check if we are done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step"
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
# (huxu): move out since we trim attention_mask by ourselves.
# if self.config.is_encoder_decoder is False:
# attention_mask = torch.cat(
# [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
# )
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx],
beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# prepare for adding eos
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len)
# shorter batches are padded if needed
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`pad_token_id` has to be defined"
decoded.fill_(pad_token_id)
# fill with hypotheses and eos_token_id if the latter fits in
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
return decoded
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
attention_mask,
use_cache,
model_kwargs,
):
"""Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = None
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs
)
outputs = self(**model_inputs, return_dict=True)
next_token_logits = outputs.logits[:, -1, :]
scores = self.postprocess_next_token_scores(
scores=next_token_logits,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=1,
)
# if model has past, then set the past variable to speed up decoding
if "past_key_values" in outputs:
past = outputs.past_key_values
elif "mems" in outputs:
past = outputs.mems
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p)
# Sample
probs = F.softmax(next_token_logscores, dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# print(next_token_logits[0,next_token[0]], next_token_logits[0,eos_token_id])
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
# add token and increase length by one
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
# if self.config.is_encoder_decoder is False:
# attention_mask = torch.cat(
# [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
# )
return input_ids
| 48,394 | 47.395 | 246 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/models/mmfusion.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
from torch import nn
try:
from transformers import AutoConfig, AutoTokenizer
except ImportError:
pass
from . import transformermodel
class MMPTModel(nn.Module):
"""An e2e wrapper of inference model.
"""
@classmethod
def from_pretrained(cls, config, checkpoint="checkpoint_best.pt"):
import os
from ..utils import recursive_config
from ..tasks import Task
config = recursive_config(config)
mmtask = Task.config_task(config)
checkpoint_path = os.path.join(config.eval.save_path, checkpoint)
mmtask.build_model(checkpoint=checkpoint_path)
# TODO(huxu): make the video encoder configurable.
from ..processors.models.s3dg import S3D
video_encoder = S3D('pretrained_models/s3d_dict.npy', 512)
video_encoder.load_state_dict(
torch.load('pretrained_models/s3d_howto100m.pth'))
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name, use_fast=config.dataset.use_fast
)
from ..processors import Aligner
aligner = Aligner(config.dataset)
return (
MMPTModel(config, mmtask.model, video_encoder),
tokenizer,
aligner
)
def __init__(self, config, model, video_encoder, **kwargs):
super().__init__()
self.max_video_len = config.dataset.max_video_len
self.video_encoder = video_encoder
self.model = model
def forward(self, video_frames, caps, cmasks, return_score=False):
bsz = video_frames.size(0)
assert bsz == 1, "only bsz=1 is supported now."
seq_len = video_frames.size(1)
video_frames = video_frames.view(-1, *video_frames.size()[2:])
vfeats = self.video_encoder(video_frames.permute(0, 4, 1, 2, 3))
vfeats = vfeats['video_embedding']
vfeats = vfeats.view(bsz, seq_len, vfeats.size(-1))
padding = torch.zeros(
bsz, self.max_video_len - seq_len, vfeats.size(-1))
vfeats = torch.cat([vfeats, padding], dim=1)
vmasks = torch.cat([
torch.ones((bsz, seq_len), dtype=torch.bool),
torch.zeros((bsz, self.max_video_len - seq_len), dtype=torch.bool)
],
dim=1
)
output = self.model(caps, cmasks, vfeats, vmasks)
if return_score:
output = {"score": torch.bmm(
output["pooled_video"][:, None, :],
output["pooled_text"][:, :, None]
).squeeze(-1).squeeze(-1)}
return output
class MMFusion(nn.Module):
"""a MMPT wrapper class for MMBert style models.
TODO: move isolated mask to a subclass.
"""
def __init__(self, config, **kwargs):
super().__init__()
transformer_config = AutoConfig.from_pretrained(
config.dataset.bert_name)
self.hidden_size = transformer_config.hidden_size
self.is_train = False
if config.dataset.train_path is not None:
self.is_train = True
# 0 means no iso; 1-12 means iso up to that layer.
self.num_hidden_layers = transformer_config.num_hidden_layers
self.last_iso_layer = 0
if config.dataset.num_iso_layer is not None:
self.last_iso_layer = config.dataset.num_iso_layer - 1 + 1
if config.model.mm_encoder_cls is not None:
mm_encoder_cls = getattr(transformermodel, config.model.mm_encoder_cls)
model_config = AutoConfig.from_pretrained(config.dataset.bert_name)
model_config.max_video_len = config.dataset.max_video_len
# TODO: a general way to add parameter for a model.
model_config.use_seg_emb = config.model.use_seg_emb
self.mm_encoder = mm_encoder_cls.from_pretrained(
config.dataset.bert_name, config=model_config)
elif config.model.video_encoder_cls is not None\
and config.model.text_encoder_cls is not None:
video_encoder_cls = getattr(transformermodel, config.model.video_encoder_cls)
model_config = AutoConfig.from_pretrained(config.dataset.bert_name)
model_config.max_video_len = config.dataset.max_video_len
# TODO: make each model a set of config class.
if hasattr(model_config, "num_layers"):
model_config.num_layers = config.model.num_hidden_video_layers
else:
model_config.num_hidden_layers = config.model.num_hidden_video_layers
self.video_encoder = video_encoder_cls.from_pretrained(
config.dataset.bert_name, config=model_config)
# exact same NLP model from Huggingface.
text_encoder_cls = getattr(transformermodel, config.model.text_encoder_cls)
self.text_encoder = text_encoder_cls.from_pretrained(
config.dataset.bert_name)
else:
raise ValueError("the encoder must be either MM or two backbones.")
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
raise NotImplementedError(
"Please derive MMFusion module."
)
def _mm_on_the_fly(
self,
cmasks,
vmasks,
attention_mask
):
"""helper function for mask, seg_ids and token_type_ids."""
if attention_mask is None:
attention_mask = self._mm_attention_mask(cmasks, vmasks)
"""
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
"""
token_type_ids = torch.cat(
[
torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device,
),
torch.ones(
(cmasks.size(0), cmasks.size(1) - 2),
dtype=torch.long,
device=cmasks.device,
),
],
dim=1,
)
return attention_mask, token_type_ids
def _mm_attention_mask(self, cmasks, vmasks):
assert cmasks.size(0) == vmasks.size(0), "{}, {}, {}, {}".format(
str(cmasks.size()),
str(vmasks.size()),
str(cmasks.size(0)),
str(vmasks.size(0)),
)
mm_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:]], dim=1)
if self.last_iso_layer == 0:
# hard attention mask.
return mm_mask
else:
# a gpu iso mask; 0 : num_iso_layer is isolated;
# num_iso_layer: are MM-fused.
# make an iso layer
batch_size = cmasks.size(0)
iso_mask = self._make_iso_mask(batch_size, cmasks, vmasks)
mm_mask = mm_mask[:, None, :].repeat(1, mm_mask.size(-1), 1)
iso_mm_masks = []
# hard attention mask.
iso_mask = iso_mask[:, None, :, :].repeat(
1, self.last_iso_layer, 1, 1)
iso_mm_masks.append(iso_mask)
if self.last_iso_layer < self.num_hidden_layers:
mm_mask = mm_mask[:, None, :, :].repeat(
1, self.num_hidden_layers - self.last_iso_layer, 1, 1
)
iso_mm_masks.append(mm_mask)
iso_mm_masks = torch.cat(iso_mm_masks, dim=1)
return iso_mm_masks
def _make_iso_mask(self, batch_size, cmasks, vmasks):
cls_self_mask = torch.cat(
[
torch.ones(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
torch.zeros(
(batch_size, cmasks.size(1) + vmasks.size(1) - 1),
dtype=torch.bool, device=cmasks.device)
], dim=1)
iso_video_mask = torch.cat(
[
# [CLS] is not used.
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device
),
vmasks,
# assume to be 1.
cmasks[:, 1:2],
# 2 means [CLS] + [SEP]
torch.zeros(
(batch_size, cmasks.size(1) - 2),
dtype=torch.bool,
device=cmasks.device,
),
],
dim=1,
)
iso_text_mask = torch.cat(
[
torch.zeros(
(batch_size, 2 + vmasks.size(1)),
dtype=torch.bool,
device=cmasks.device,
), # [CLS] is not used.
cmasks[:, 2:], # assume to be 1.
],
dim=1,
)
cls_self_mask = cls_self_mask[:, None, :]
iso_video_mask = iso_video_mask[:, None, :].repeat(
1, vmasks.size(1) + 1, 1)
iso_text_mask = iso_text_mask[:, None, :].repeat(
1, cmasks.size(1) - 2, 1)
return torch.cat([cls_self_mask, iso_video_mask, iso_text_mask], dim=1)
def _pooling_vt_layer(
self,
layered_sequence_output,
cmasks,
vmasks
):
layer_idx = self.last_iso_layer \
if self.last_iso_layer > 0 else self.num_hidden_layers
hidden_state = layered_sequence_output[layer_idx]
# also output pooled_video and pooled_text.
batch_size = cmasks.size(0)
# pool the modality.
text_offset = vmasks.size(1) + 2 # [CLS] + [SEP]
# video tokens + [SEP]
video_outputs = hidden_state[:, 1:text_offset]
video_attention_mask = torch.cat(
[
vmasks,
torch.ones(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
pooled_video = torch.sum(
video_outputs * video_attention_mask.unsqueeze(-1), dim=1
) / video_attention_mask.sum(1, keepdim=True)
# pooled_video = torch.mean(video_outputs[0], dim=1)
# text tokens + [SEP]
text_attention_mask = cmasks[:, 2:]
text_outputs = hidden_state[:, text_offset:]
assert text_outputs.size(1) == text_attention_mask.size(1)
pooled_text = torch.sum(
text_outputs * text_attention_mask.unsqueeze(-1), dim=1
) / text_attention_mask.sum(1, keepdim=True)
return pooled_video, pooled_text
class MMFusionMFMMLM(MMFusion):
"""forward function for MFM and MLM."""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
**kwargs
):
output_hidden_states = False if self.is_train else True
target_vfeats, non_masked_frame_mask = None, None
if video_label is not None:
target_vfeats = vfeats.masked_select(
video_label.unsqueeze(-1)).view(
-1, vfeats.size(-1)
)
# mask video token.
vfeats[video_label] = 0.0
non_masked_frame_mask = vmasks.clone()
non_masked_frame_mask[video_label] = False
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
masked_frame_labels=video_label,
target_video_hidden_states=target_vfeats,
non_masked_frame_mask=non_masked_frame_mask,
masked_lm_labels=text_label,
output_hidden_states=output_hidden_states,
)
video_logits, text_logits = outputs[0], outputs[1]
if self.is_train: # return earlier for training.
return {
"video_logits": video_logits,
"text_logits": text_logits,
}
pooled_video, pooled_text = self._pooling_vt_layer(
outputs[2], cmasks, vmasks)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
class MMFusionMTM(MMFusionMFMMLM):
def __init__(self, config, **kwargs):
super().__init__(config)
"""
For reproducibility:
self.mm_encoder will be initialized then discarded.
"""
from .transformermodel import MMBertForMTM
model_config = AutoConfig.from_pretrained(config.dataset.bert_name)
model_config.max_video_len = config.dataset.max_video_len
model_config.use_seg_emb = config.model.use_seg_emb
self.mm_encoder = MMBertForMTM.from_pretrained(
config.dataset.bert_name, config=model_config)
class MMFusionShare(MMFusion):
"""A retrival wrapper using mm_encoder as both video/text backbone.
TODO: move formally.
"""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
output_hidden_states=False,
**kwargs
):
pooled_video = self.forward_video(
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states
)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states
)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
def forward_video(
self,
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = caps[:, :2]
attention_mask = torch.cat([
cmasks[:, :1],
vmasks,
cmasks[:, 1:2]
], dim=1)
token_type_ids = torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device)
outputs = self.mm_encoder(
input_ids=input_ids,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
video_outputs = outputs[0]
if output_hidden_states:
return video_outputs
batch_size = cmasks.size(0)
video_attention_mask = torch.cat(
[
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
vmasks,
torch.ones(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
video_attention_mask = video_attention_mask.type(video_outputs.dtype) \
/ video_attention_mask.sum(1, keepdim=True)
pooled_video = torch.bmm(
video_outputs.transpose(2, 1),
video_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_video # video_outputs
def forward_text(
self,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = torch.cat([
caps[:, :1], caps[:, 2:],
], dim=1)
attention_mask = torch.cat([
cmasks[:, :1],
cmasks[:, 2:]
], dim=1)
token_type_ids = torch.cat([
torch.zeros(
(cmasks.size(0), 1),
dtype=torch.long,
device=cmasks.device),
torch.ones(
(cmasks.size(0), cmasks.size(1) - 2),
dtype=torch.long,
device=cmasks.device)
], dim=1)
outputs = self.mm_encoder(
input_ids=input_ids,
input_video_embeds=None,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
text_outputs = outputs[0]
if output_hidden_states:
return text_outputs
batch_size = caps.size(0)
# text tokens + [SEP]
text_attention_mask = torch.cat([
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
cmasks[:, 2:]
], dim=1)
assert text_outputs.size(1) == text_attention_mask.size(1)
text_attention_mask = text_attention_mask.type(text_outputs.dtype) \
/ text_attention_mask.sum(1, keepdim=True)
pooled_text = torch.bmm(
text_outputs.transpose(2, 1),
text_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_text # text_outputs
class MMFusionSeparate(MMFusionShare):
def forward_video(
self,
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = caps[:, :2]
attention_mask = torch.cat([
cmasks[:, :1],
vmasks,
cmasks[:, 1:2]
], dim=1)
token_type_ids = torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device)
outputs = self.video_encoder(
input_ids=input_ids,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
video_outputs = outputs[0]
if output_hidden_states:
return video_outputs
batch_size = cmasks.size(0)
video_attention_mask = torch.cat(
[
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
vmasks,
torch.ones(
(batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
video_attention_mask = video_attention_mask.type(video_outputs.dtype) \
/ video_attention_mask.sum(1, keepdim=True)
pooled_video = torch.bmm(
video_outputs.transpose(2, 1),
video_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_video # video_outputs
def forward_text(
self,
caps,
cmasks,
output_hidden_states=False,
**kwargs
):
input_ids = torch.cat([
caps[:, :1], caps[:, 2:],
], dim=1)
attention_mask = torch.cat([
cmasks[:, :1],
cmasks[:, 2:]
], dim=1)
# different from sharing, we use all-0 type.
token_type_ids = torch.zeros(
(cmasks.size(0), cmasks.size(1) - 1),
dtype=torch.long,
device=cmasks.device)
outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
text_outputs = outputs[0]
if output_hidden_states:
return text_outputs
batch_size = caps.size(0)
# text tokens + [SEP]
text_attention_mask = torch.cat([
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
cmasks[:, 2:]
], dim=1)
assert text_outputs.size(1) == text_attention_mask.size(1)
text_attention_mask = text_attention_mask.type(text_outputs.dtype) \
/ text_attention_mask.sum(1, keepdim=True)
pooled_text = torch.bmm(
text_outputs.transpose(2, 1),
text_attention_mask.unsqueeze(2)
).squeeze(-1)
return pooled_text # text_outputs
class MMFusionJoint(MMFusion):
"""fine-tuning wrapper for retrival task."""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
**kwargs
):
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
output_hidden_states = True
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
separate_forward_split = (
None if self.is_train else vmasks.size(1) + 2
) # [CLS] + [SEP]
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
separate_forward_split=separate_forward_split,
)
pooled_video, pooled_text = self._pooling_vt_layer(
outputs[2], cmasks, vmasks)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
class MMFusionActionSegmentation(MMFusion):
"""Fine-tuning wrapper for action segmentation.
TODO: rename this for VLM.
"""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.view(-1, caps.size(-1))
cmasks = cmasks.view(-1, cmasks.size(-1))
vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3))
vmasks = vmasks.view(-1, vmasks.size(-1))
# this may not cover all shapes of attention_mask.
attention_mask = attention_mask.view(
-1, attention_mask.size(2), attention_mask.size(3)) \
if attention_mask is not None else None
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
output_hidden_states = True
# video forwarding, text is dummy; never use attention_mask.
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, vmasks, attention_mask)
logits = self.mm_encoder(
input_ids=caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
)
return {"logits": logits[0][:, 1:vmasks.size(1)+1]}
class MMFusionActionLocalization(MMFusion):
"""fine-tuning model for retrival task."""
def __init__(self, config, **kwargs):
super().__init__(config)
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.squeeze(0)
cmasks = cmasks.squeeze(0)
vfeats = vfeats.squeeze(0)
vmasks = vmasks.squeeze(0)
attention_mask = attention_mask.squeeze(0) if attention_mask is not None else None
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
output_hidden_states = True
# a len1 dummy video token.
dummy_vfeats = torch.zeros(
(caps.size(0), 1, vfeats.size(-1)), device=vfeats.device, dtype=vfeats.dtype)
dummy_vmasks = torch.ones(
(caps.size(0), 1), dtype=torch.bool,
device=vfeats.device)
dummy_caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).to(caps.device).repeat(vfeats.size(0), 1)
dummy_cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).to(caps.device).repeat(vfeats.size(0), 1)
# video forwarding, text is dummy; never use attention_mask.
attention_mask, token_type_ids = self._mm_on_the_fly(
dummy_cmasks, vmasks, None)
outputs = self.mm_encoder(
input_ids=dummy_caps,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
)
layer_idx = self.last_iso_layer \
if self.last_iso_layer > 0 else self.num_hidden_layers
video_seq = outputs[2][layer_idx][:, 1:vmasks.size(1)+1].masked_select(
vmasks.unsqueeze(-1)
).view(-1, self.hidden_size)
# text forwarding, video is dummy
attention_mask, token_type_ids = self._mm_on_the_fly(
cmasks, dummy_vmasks, None)
outputs = self.mm_encoder(
input_ids=caps,
input_video_embeds=dummy_vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=output_hidden_states,
)
_, pooled_text = self._pooling_vt_layer(
outputs[2], cmasks, dummy_vmasks)
# this line is not right.
logits = torch.mm(video_seq, pooled_text.transpose(1, 0))
return {"logits": logits}
# --------------- MMFusionSeparate for end tasks ---------------
class MMFusionSeparateActionSegmentation(MMFusionSeparate):
"""Fine-tuning wrapper for action segmentation."""
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.view(-1, caps.size(-1))
cmasks = cmasks.view(-1, cmasks.size(-1))
vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3))
vmasks = vmasks.view(-1, vmasks.size(-1))
logits = self.forward_video(
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=True
)
return {"logits": logits[:, 1:vmasks.size(1)+1]}
class MMFusionSeparateActionLocalization(MMFusionSeparate):
def __init__(self, config, **kwargs):
super().__init__(config)
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.squeeze(0)
cmasks = cmasks.squeeze(0)
vfeats = vfeats.squeeze(0)
vmasks = vmasks.squeeze(0)
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
dummy_caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).to(caps.device).repeat(vfeats.size(0), 1)
dummy_cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).to(caps.device).repeat(vfeats.size(0), 1)
outputs = self.forward_video(
vfeats,
vmasks,
dummy_caps,
dummy_cmasks,
output_hidden_states=True
)
video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select(
vmasks.unsqueeze(-1)
).view(-1, self.hidden_size)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states=False
)
# this line is not right.
logits = torch.mm(video_seq, pooled_text.transpose(1, 0))
return {"logits": logits}
class MMFusionShareActionLocalization(MMFusionShare):
def __init__(self, config, **kwargs):
super().__init__(config)
tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
# ActionLocalization assume of batch_size=1, squeeze it.
caps = caps.squeeze(0)
cmasks = cmasks.squeeze(0)
vfeats = vfeats.squeeze(0)
vmasks = vmasks.squeeze(0)
# TODO (huxu): other ways to do negative examples; move the following
# into your criterion forward.
dummy_caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).to(caps.device).repeat(vfeats.size(0), 1)
dummy_cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).to(caps.device).repeat(vfeats.size(0), 1)
outputs = self.forward_video(
vfeats,
vmasks,
dummy_caps,
dummy_cmasks,
output_hidden_states=True
)
video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select(
vmasks.unsqueeze(-1)
).view(-1, self.hidden_size)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states=False
)
# this line is not right.
logits = torch.mm(video_seq, pooled_text.transpose(1, 0))
return {"logits": logits}
| 30,634 | 32.047465 | 90 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/datasets/fairseqmmdataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
TODO (huxu): fairseq wrapper class for all dataset you defined: mostly MMDataset.
"""
from collections import OrderedDict
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from fairseq.data import FairseqDataset, data_utils
class FairseqMMDataset(FairseqDataset):
"""
A wrapper class for MMDataset for fairseq.
"""
def __init__(self, mmdataset):
if not isinstance(mmdataset, Dataset):
raise TypeError("mmdataset must be of type `torch.utils.data.dataset`.")
self.mmdataset = mmdataset
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
def __getitem__(self, idx):
with data_utils.numpy_seed(43211, self.epoch, idx):
return self.mmdataset[idx]
def __len__(self):
return len(self.mmdataset)
def collater(self, samples):
if hasattr(self.mmdataset, "collator"):
return self.mmdataset.collator(samples)
if len(samples) == 0:
return {}
if isinstance(samples[0], dict):
batch = OrderedDict()
for key in samples[0]:
if samples[0][key] is not None:
batch[key] = default_collate([sample[key] for sample in samples])
return batch
else:
return default_collate(samples)
def size(self, index):
"""dummy implementation: we don't use --max-tokens"""
return 1
def num_tokens(self, index):
"""dummy implementation: we don't use --max-tokens"""
return 1
| 1,785 | 29.793103 | 85 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/datasets/mmdataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from collections import OrderedDict
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from ..utils import set_seed
class MMDataset(Dataset):
"""
A generic multi-modal dataset.
Args:
`meta_processor`: a meta processor,
handling loading meta data and return video_id and text_id.
`video_processor`: a video processor,
handling e.g., decoding, loading .np files.
`text_processor`: a text processor,
handling e.g., tokenization.
`aligner`: combine the video and text feature
as one training example.
"""
def __init__(
self,
meta_processor,
video_processor,
text_processor,
align_processor,
):
self.split = meta_processor.split
self.meta_processor = meta_processor
self.video_processor = video_processor
self.text_processor = text_processor
self.align_processor = align_processor
def __len__(self):
return len(self.meta_processor)
def __getitem__(self, idx):
if self.split == "test":
set_seed(idx)
video_id, text_id = self.meta_processor[idx]
video_feature = self.video_processor(video_id)
text_feature = self.text_processor(text_id)
output = self.align_processor(video_id, video_feature, text_feature)
# TODO (huxu): the following is for debug purpose.
output.update({"idx": idx})
return output
def collater(self, samples):
"""This collator is deprecated.
set self.collator = MMDataset.collater.
see collator in FairseqMMDataset.
"""
if len(samples) == 0:
return {}
if isinstance(samples[0], dict):
batch = OrderedDict()
for key in samples[0]:
if samples[0][key] is not None:
batch[key] = default_collate(
[sample[key] for sample in samples])
# if torch.is_tensor(batch[key]):
# print(key, batch[key].size())
# else:
# print(key, len(batch[key]))
return batch
else:
return default_collate(samples)
def print_example(self, output):
print("[one example]", output["video_id"])
if (
hasattr(self.align_processor, "subsampling")
and self.align_processor.subsampling is not None
and self.align_processor.subsampling > 1
):
for key in output:
if torch.is_tensor(output[key]):
output[key] = output[key][0]
# search tokenizer to translate ids back.
tokenizer = None
if hasattr(self.text_processor, "tokenizer"):
tokenizer = self.text_processor.tokenizer
elif hasattr(self.align_processor, "tokenizer"):
tokenizer = self.align_processor.tokenizer
if tokenizer is not None:
caps = output["caps"].tolist()
if isinstance(caps[0], list):
caps = caps[0]
print("caps", tokenizer.decode(caps))
print("caps", tokenizer.convert_ids_to_tokens(caps))
for key, value in output.items():
if torch.is_tensor(value):
if len(value.size()) >= 3: # attention_mask.
print(key, value.size())
print(key, "first", value[0, :, :])
print(key, "last", value[-1, :, :])
else:
print(key, value)
print("[end of one example]")
| 3,873 | 33.589286 | 76 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/evaluators/predictor.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import json
import numpy as np
import torch
import pickle
import math
from tqdm import tqdm
class Predictor(object):
"""this base class is used to save predictions to disk
(and being called by a evaluator later).
Predictor has minimum support of single gpu prediction.
"""
def __init__(self, config):
self.pred_dir = None # on-the-fly eval does not save the results.
if hasattr(config, "eval") and config.eval is not None:
self.pred_dir = config.eval.save_path
os.makedirs(self.pred_dir, exist_ok=True)
def __call__(self, outputs):
"""extract the prediction and save it."""
raise NotImplementedError
def predict_loop(self, model, eval_dataloader, output_file=None):
"""on-the-fly prediction on a single gpu."""
self.full_scores = []
model.eval()
model = model.to(0)
with torch.no_grad():
for data in eval_dataloader:
data = self.to_ctx(data)
outputs = model(**data)
outputs.update(data)
self(outputs)
return self.finalize(output_file)
def finalize(self, output_file):
pass
def to_ctx(self, data, ctx=0, dtype=None):
if isinstance(data, dict):
for key in data:
if torch.is_tensor(data[key]):
if dtype is not None and data[key].dtype == torch.float32:
data[key] = data[key].to(dtype)
data[key] = data[key].to(ctx)
return data
else:
raise ValueError("non-dict type of batch is not supported yet.")
class NLGPredictor(Predictor):
"""Predicting Text from MMFusion models."""
"""TODO: make a context."""
def __init__(self, config):
super().__init__(config)
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name,
bos_token="[CLS]", eos_token="[SEP]")
self.bos_token_id = self.tokenizer.bos_token_id
self.eos_token_id = self.tokenizer.eos_token_id
def predict_loop(self, model, eval_dataloader, output_file=None):
"""TODO: refactor base classes."""
ctx = 0
outputs = {"outputs": [], "targets": [[]]}
model.eval()
model = model.to(ctx)
with torch.no_grad():
for data in tqdm(eval_dataloader):
data = self.to_ctx(data, ctx)
self(data, model, outputs)
return self.finalize(outputs, output_file)
def __call__(self, data, model, outputs):
data.update({
"bos_token_id": self.bos_token_id,
"eos_token_id": self.eos_token_id
})
output = model.generate(**data)
assert len(output) == len(data["ref"])
for idx, _output in enumerate(output):
generated_text = self.tokenizer.decode(
_output, skip_special_tokens=True)
if generated_text == "":
generated_text = "none"
outputs["outputs"].append(generated_text)
outputs["targets"][0].append(data["ref"][idx])
if random.random() < 0.001:
print("_output", _output)
print("generated_text", generated_text)
print("ref", data["ref"][idx])
def finalize(self, outputs, output_file=None):
if output_file is not None:
with open(os.path.join(
self.pred_dir, output_file + ".json"), "w") as fw:
json.dump(outputs, fw, indent=4)
return outputs
class RetrievalPredictor(Predictor):
"""generated `pooled_video` and `pooled_text`."""
def __init__(self, config):
super().__init__(config)
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.dataset.bert_name)
def predict_loop(
self,
model,
eval_dataloader,
output_file="retrieval.npy"
):
"""on-the-fly prediction on a single gpu."""
full_scores = []
texts = []
model.eval()
model = model.cuda()
with torch.no_grad():
for data in eval_dataloader:
# convert to dict.
if not isinstance(data, dict):
data = {
"caps": data[0],
"cmasks": data[1],
"vfeats": data[2],
"vmasks": data[3],
"video_id": data[4]
}
data = self.to_ctx(data)
outputs = model(**data)
outputs.update(data)
self(outputs, full_scores)
for _cap in data["caps"]:
texts.append(
self.tokenizer.decode(_cap, skip_special_tokens=True)
)
return self.finalize(full_scores, texts, output_file)
def __call__(self, sample, full_scores):
scores = self._get_pooled_outputs(sample)
self._append_scores(scores, full_scores)
def finalize(self, full_scores, texts, output_file=None):
outputs = self._aggregate_scores(full_scores)
if output_file is not None:
np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs)
return {"outputs": outputs, "texts": texts}
def _get_pooled_outputs(self, outputs):
if "pooled_video" in outputs:
return outputs["pooled_video"], outputs["pooled_text"]
else:
raise ValueError("unknown format of outputs.")
def _append_scores(self, scores, full_scores):
assert len(scores) == 2
if len(full_scores) == 0:
full_scores.append([])
full_scores.append([])
full_scores[0].append(scores[0].cpu().detach().numpy())
full_scores[1].append(scores[1].cpu().detach().numpy())
def _aggregate_scores(self, scores):
assert len(scores) == 2
video_hidden = np.concatenate(scores[0], axis=0)
text_hidden = np.concatenate(scores[1], axis=0)
# clear up.
self.full_scores = []
return np.matmul(text_hidden, video_hidden.T)
class QAPredictor(Predictor):
"""generated `pooled_video` and `pooled_text`."""
def __init__(self, config):
super().__init__(config)
"""predictor maintains scores and aggregate them."""
def predict_loop(self, model, eval_dataloader, output_file="qa.npy"):
"""on-the-fly prediction on a single gpu."""
self.full_scores = []
model.eval()
model = model.cuda()
with torch.no_grad():
for data in eval_dataloader:
# reshape ans and dup video 5 times.
v_len = data["vfeats"].size(1)
hidden_size = data["vfeats"].size(2)
data["vfeats"] = data["vfeats"].unsqueeze(1).repeat(1, 5, 1, 1).view(-1, v_len, hidden_size)
data["vmasks"] = data["vmasks"].unsqueeze(1).repeat(1, 5, 1).view(-1, v_len)
t_len = data["caps"].size(-1)
data["caps"] = data["caps"].view(-1, t_len)
data["cmasks"] = data["cmasks"].view(-1, t_len)
data = self.to_ctx(data)
outputs = model(**data)
outputs.update(data)
self(outputs)
return self.finalize(output_file)
def __call__(self, sample):
hidden_size = sample["pooled_video"].size(-1)
pooled_video = sample["pooled_video"].view(-1, 5, hidden_size)
pooled_text = sample["pooled_text"].view(-1, 5, hidden_size)
scores = torch.bmm(pooled_video, pooled_text.transpose(2, 1))
scores = scores.argmax(-1)
self._append_scores(scores[:, 0], sample["answers"], self.full_scores)
def finalize(self, output_file=None):
outputs, targets = self._aggregate_scores(self.full_scores)
if output_file is not None:
np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs)
return {"outputs": outputs, "targets": targets}
def _append_scores(self, scores, answers, full_scores):
if len(full_scores) == 0:
full_scores.append([])
full_scores.append([])
full_scores[0].append(scores.cpu().detach().numpy())
full_scores[1].append(answers.cpu().detach().numpy())
def _aggregate_scores(self, scores):
assert len(scores) == 2
outputs = np.concatenate(scores[0], axis=0)
targets = np.concatenate(scores[1], axis=0)
# clear up.
self.full_scores = []
return outputs, targets
class CrossTaskPredictor(Predictor):
"""
CrossTaskPredictor needs to compute the average of logits
for overlapped sliding-window.
"""
def __init__(self, config):
super().__init__(config)
self.lsm = torch.nn.LogSoftmax(dim=1)
self.max_video_len = config.dataset.max_video_len
self.sliding_window = config.dataset.sliding_window
self.sliding_window_size = config.dataset.sliding_window_size
self.annotation_path = config.dataset.annotation_path
def predict_loop(self, model, eval_dataloader, output_file="result.pkl"):
"""refactored from line 144:
https://github.com/DmZhukov/CrossTask/blob/master/train.py
"""
ctx = 0
model.eval()
model = model.to(ctx)
# this is not a loss but just compute neg_log_prob.
Y_pred = {}
Y_true = {}
with torch.no_grad():
for batch in eval_dataloader:
self(batch, model, Y_pred, Y_true)
return self.finalize(Y_pred, Y_true, output_file)
def __call__(self, sample, model, Y_pred, Y_true):
# please install dp from `https://github.com/DmZhukov/CrossTask`
from dp import dp
vid, task = sample['video_id'][0], sample['task'][0]
sample = self.to_ctx(sample)
# compute the average logits over sliding windows.
output = model(**sample)
batch_logits = output["logits"].cpu()
video_len = sample["video_len"][0]
# the following version is slow.
logits = torch.zeros((video_len, batch_logits.size(1)))
logits_counts = torch.zeros((video_len, 1), dtype=torch.long)
# use the same loop as aligner to recover.
batch_logit_idx = 0
for window_start in range(0, video_len, self.sliding_window):
video_end = min(video_len - window_start, self.sliding_window_size)
logits[window_start: window_start + video_end] += batch_logits[
batch_logit_idx: batch_logit_idx + video_end]
batch_logit_idx += video_end
logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long)
if (video_len - window_start) <= self.sliding_window_size:
break
logits /= logits_counts
assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len)
O = self.lsm(logits)
y = np.zeros(O.size(), dtype=np.float32)
dp(y, -O.detach().cpu().numpy())
if task not in Y_pred:
Y_pred[task] = {}
Y_pred[task][vid] = y
annot_path = os.path.join(
self.annotation_path, task+'_'+vid+'.csv')
if os.path.exists(annot_path):
if task not in Y_true:
Y_true[task] = {}
Y_true[task][vid] = self._read_assignment(
*y.shape, annot_path)
def finalize(self, Y_pred, Y_true, output_file=None):
if output_file is not None:
with open(
os.path.join(self.pred_dir, output_file + ".pkl"),
"wb") as fw:
pickle.dump(
{"Y_pred": Y_pred, "Y_true": Y_true}, fw,
protocol=pickle.HIGHEST_PROTOCOL)
return {"outputs": Y_pred, "targets": Y_true}
def _read_assignment(self, T, K, path):
"""
refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py
Howto interpret contraints on loss that is going to be minimized:
lambd is a big number;
self.lambd * C is a big number for all valid position (csv stores invalids)
def forward(self, O, Y, C):
return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum()
This will load the csv file and fill-in the step col from start to end rows.
"""
Y = np.zeros([T, K], dtype=np.uint8)
with open(path, 'r') as f:
for line in f:
step, start, end = line.strip().split(',')
start = int(math.floor(float(start)))
end = int(math.ceil(float(end)))
step = int(step) - 1
Y[start:end, step] = 1
return Y
class COINPredictor(Predictor):
"""
COINPredictor is similar to CrossTask on sliding windows.
"""
def __init__(self, config):
super().__init__(config)
self.max_video_len = config.dataset.max_video_len
self.sliding_window = config.dataset.sliding_window
self.sliding_window_size = config.dataset.sliding_window_size
def predict_loop(self, model, eval_dataloader, output_file="result.pkl"):
"""refactored from line 144:
https://github.com/DmZhukov/CrossTask/blob/master/train.py
"""
ctx = 0
model.eval()
model = model.to(ctx)
# this is not a loss but just compute neg_log_prob.
Y_pred = []
Y_true = []
with torch.no_grad():
for batch in eval_dataloader:
self(batch, model, Y_pred, Y_true)
return self.finalize(Y_pred, Y_true, output_file)
def __call__(self, sample, model, Y_pred, Y_true):
sample = self.to_ctx(sample)
# compute the average logits over sliding windows.
output = model(**sample)
logits = self._merge_windows(sample, output)
Y_pred.append(logits.argmax(dim=1))
Y_true.append(sample["video_targets"].squeeze(0).cpu())
def _merge_windows(self, sample, output):
targets = sample["targets"].reshape(-1).cpu()
valid_mask = targets != -100
targets = targets[valid_mask]
batch_logits = output["logits"].cpu()
batch_logits = batch_logits.reshape(-1, batch_logits.size(-1))
batch_logits = batch_logits[valid_mask]
video_len = sample["video_len"][0]
# the following version is slow.
logits = torch.zeros((video_len, batch_logits.size(1)))
logits_counts = torch.zeros((video_len, 1), dtype=torch.long)
# use the same loop as aligner to recover.
batch_logit_idx = 0
for window_start in range(0, video_len, self.sliding_window):
video_end = min(video_len - window_start, self.sliding_window_size)
logits[window_start: window_start + video_end] += batch_logits[
batch_logit_idx: batch_logit_idx + video_end]
batch_logit_idx += video_end
logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long)
if (video_len - window_start) <= self.sliding_window_size:
break
logits /= logits_counts
assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len)
return logits
def finalize(self, Y_pred, Y_true, output_file=None):
Y_pred = torch.cat(Y_pred, dim=0).numpy()
Y_true = torch.cat(Y_true, dim=0).numpy()
assert len(Y_pred) == len(Y_true)
error_mask = Y_pred != Y_true
print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10])
print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20])
if output_file is not None:
with open(
os.path.join(self.pred_dir, output_file + ".pkl"),
"wb") as fw:
pickle.dump(
{"Y_pred": Y_pred, "Y_true": Y_true}, fw,
protocol=pickle.HIGHEST_PROTOCOL)
return {"outputs": Y_pred, "targets": Y_true}
class COINZSPredictor(COINPredictor):
"""
COINZSPredictor for COIN zero-shot prediction.
"""
def __init__(self, config):
super().__init__(config)
self.dataset_config = config.dataset
def predict_loop(self, model, eval_dataloader, output_file="result.pkl"):
"""refactored from line 144:
https://github.com/DmZhukov/CrossTask/blob/master/train.py
"""
ctx = 0
model.eval()
model = model.to(ctx)
with torch.no_grad():
outputs = eval_dataloader.dataset.meta_processor.meta_text_labels(
self.dataset_config)
outputs = self.to_ctx(outputs, ctx)
label_hidden_states = model.forward_text(**outputs).cpu()
label_sim = label_hidden_states @ label_hidden_states.t()
num_labels = label_sim.size(0)
eye_mask = ~torch.eye(num_labels, dtype=torch.bool)
label_sim = label_sim.masked_select(eye_mask).view(num_labels, num_labels - 1)
lbd = label_sim.max()
# this is not a loss but just compute neg_log_prob.
Y_pred = []
Y_true = []
with torch.no_grad():
for batch in eval_dataloader:
self(batch, label_hidden_states, model, lbd, Y_pred, Y_true)
return self.finalize(Y_pred, Y_true, output_file)
def reshape_subsample(self, sample):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if len(tensor.size()) > 1 and tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return tensor
def __call__(self, sample, label_hidden_states, model, lbd, Y_pred, Y_true):
sample = self.reshape_subsample(sample)
sample = self.to_ctx(sample)
# compute the average logits over sliding windows.
sample["output_hidden_states"] = True
video_outputs = model.forward_video(**sample).cpu()
output = {"logits": video_outputs[:, 1:sample["vmasks"].size(1)+1] @ label_hidden_states.t()}
logits = self._merge_windows(sample, output)
# logic of zero-shot for sequence labeling.
logits_argmax = logits.argmax(dim=1) + 1 # 0 is "O" label.
logits_max = logits.max(dim=1)[0]
pred = torch.zeros_like(logits_argmax)
label_select = logits_max > lbd # 73 or 74
pred[label_select] = logits_argmax[label_select]
Y_pred.append(pred)
Y_true.append(sample["video_targets"].squeeze(0).cpu())
def finalize(self, Y_pred, Y_true, output_file=None):
Y_pred = torch.cat(Y_pred, dim=0).numpy()
Y_true = torch.cat(Y_true, dim=0).numpy()
assert len(Y_pred) == len(Y_true)
error_mask = Y_pred != Y_true
print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10])
print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20])
if output_file is not None:
with open(
os.path.join(self.pred_dir, output_file + ".pkl"),
"wb") as fw:
pickle.dump(
{"Y_pred": Y_pred, "Y_true": Y_true}, fw,
protocol=pickle.HIGHEST_PROTOCOL)
return {"outputs": Y_pred, "targets": Y_true}
class DiDeMoPredictor(Predictor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __init__(self, config):
super().__init__(config)
# load targets.
with open(config.dataset.test_path) as data_file:
self.test_data = json.load(data_file)
def predict_loop(self, model, eval_dataloader, output_file="didemo.npy"):
"""
TODO: two solutions here.
"""
import itertools
# 21 chunks.
self.possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
self.possible_segments.append(i)
# pick segments from a video.
"""on-the-fly prediction on a single gpu."""
self.full_scores = []
model.eval()
model = model.cuda()
with torch.no_grad():
for data in eval_dataloader:
# TODO special forwarding logic here.
data = self.to_ctx(data)
data["output_hidden_states"] = True
hidden_video = model.forward_video(**data)
data["output_hidden_states"] = False
pooled_text = model.forward_text(**data)
outputs = {
"hidden_video": hidden_video,
"pooled_text": pooled_text
}
outputs.update(data)
self(outputs)
return self.finalize(output_file)
def __call__(self, sample):
# TODO: make an index select from self.possible_segments.
hidden_video = sample["hidden_video"]
pooled_text = sample["pooled_text"]
vmasks = sample["vmasks"]
# probably maintain valid results here.
hidden_video = hidden_video[:, 1:-1, :]
# probably maintain valid results here.
pooled_video = []
for s, e in self.possible_segments:
pooled_video.append(
torch.mean(
hidden_video[:, int(s*5):int((e+1)*5), :],
dim=1, keepdim=True)
)
pooled_video = torch.cat(pooled_video, dim=1)
scores = torch.bmm(
pooled_video, pooled_text.unsqueeze(-1)).squeeze(-1).cpu()
ranks = scores.argsort(dim=-1, descending=True)
for batch_idx, rank in enumerate(ranks):
rank_of_moment = []
for m_idx, moment in enumerate(rank):
s, e = self.possible_segments[moment.item()]
if torch.any(
vmasks[batch_idx, int(s*5):int((e+1)*5)]
):
rank_of_moment.append((s, e))
self.full_scores.append(rank_of_moment)
def finalize(self, output_file=None):
outputs = self._aggregate_scores(self.full_scores)
if output_file is not None:
np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs)
return {"outputs": outputs, "targets": self.test_data}
def _aggregate_scores(self, scores):
self.full_scores = []
return scores
| 23,125 | 37.802013 | 113 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/processors/how2processor.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. All Rights Reserved
import torch
import math
import pickle
import random
import os
import numpy as np
from collections import deque
from typing import Optional, Tuple, List
from .processor import (
Processor,
MetaProcessor,
TextProcessor,
Aligner,
MMAttentionMask2DProcessor
)
from ..utils import ShardedTensor
class How2MetaProcessor(MetaProcessor):
def __init__(self, config):
super().__init__(config)
path = self._get_split_path(config)
with open(path) as fd:
self.data = [line.strip() for line in fd]
def __getitem__(self, idx):
video_id = self.data[idx]
return video_id, video_id
class ShardedHow2MetaProcessor(How2MetaProcessor):
def __init__(self, config):
super().__init__(config)
self.split = str(config.split)
self.vfeat_dir = config.vfeat_dir
self._init_shard()
def _init_shard(self):
if self.split == "train":
meta_fn = os.path.join(self.vfeat_dir, "train" + "_meta.pkl")
with open(meta_fn, "rb") as fr:
meta = pickle.load(fr)
elif self.split == "valid":
meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl")
with open(meta_fn, "rb") as fr:
meta = pickle.load(fr)
elif self.split == "test":
print("use how2 val as test.")
meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl")
with open(meta_fn, "rb") as fr:
meta = pickle.load(fr)
else:
raise ValueError("unsupported for MetaProcessor:", self.split)
video_id_to_shard = {}
for shard_id in meta:
for video_idx, video_id in enumerate(meta[shard_id]):
video_id_to_shard[video_id] = (shard_id, video_idx)
self.video_id_to_shard = video_id_to_shard
def __getitem__(self, idx):
video_id, video_id = super().__getitem__(idx)
shard_id, shard_idx = self.video_id_to_shard[video_id]
meta = (video_id, idx, shard_id, shard_idx)
return meta, meta
class ShardedVideoProcessor(Processor):
"""
mmaped shards of numpy video features.
"""
def __init__(self, config):
self.split = str(config.split)
self.vfeat_dir = config.vfeat_dir
def __call__(self, video_id):
_, _, shard_id, video_idx = video_id
if self.split == "train":
shard = ShardedTensor.load(
os.path.join(self.vfeat_dir, "train" + "_" + str(shard_id)),
"r"
)
elif self.split == "valid":
shard = ShardedTensor.load(
os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)),
"r"
)
elif self.split == "test":
shard = ShardedTensor.load(
os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)),
"r"
)
else:
raise ValueError("unknown split", self.split)
feat = shard[video_idx]
return feat
class ShardedTextProcessor(Processor):
def __init__(self, config):
self.tfeat_dir = str(config.tfeat_dir)
self.split = str(config.split)
def __call__(self, video_id):
_, _, shard_id, shard_idx = video_id
if self.split == "train":
target_path = self.tfeat_dir + "train" + "_" + str(shard_id)
elif self.split == "valid":
target_path = self.tfeat_dir + "val" + "_" + str(shard_id)
elif self.split == "test":
target_path = self.tfeat_dir + "val" + "_" + str(shard_id)
else:
raise ValueError("unknown split", self.split)
startend = ShardedTensor.load(
target_path + ".startends", "r")[shard_idx]
cap_ids = ShardedTensor.load(
target_path + ".caps_ids", "r")[shard_idx]
cap = []
for clip_idx in range(len(cap_ids)):
clip = cap_ids[clip_idx]
cap.append(clip[clip != -1].tolist())
start, end = startend[:, 0].tolist(), startend[:, 1].tolist()
return {"start": start, "end": end, "cap": cap}
class FixedLenAligner(Aligner):
"""
In the model we assume text is on the left (closer to BERT formulation)
and video is on the right.
We fix the total length of text + video.
max_video_len is in number of secs.
max_text_len is in number of tokens.
special tokens formats:
we use the format [CLS] [SEP] text tokens [SEP] [PAD] ...
[CLS] will be splitted out into:
[CLS] video tokens [SEP] text tokens [SEP] [PAD] ...
token_type_ids will be generated by the model (for now).
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
so each sequence owns a [SEP] token for no-ops.
"""
def __init__(self, config):
super().__init__(config)
self.text_clip_sampler = TextClipSamplingProcessor(
self.max_len - self.max_video_len - 3
)
"""
decide subsampling:
`config.subsampling` will change batch_size in trainer.
`config.clip_per_video` (used by RetriTask) doesn't
change batch_size in trainer.
"""
subsampling = config.subsampling \
if config.subsampling is not None else None
if config.clip_per_video is not None:
subsampling = config.clip_per_video
self.subsampling = subsampling
def _get_text_maxlen(self):
# use max text len
return self.text_clip_sampler.max_text_len
def __call__(self, video_id, video_feature, text_feature):
from transformers import default_data_collator
video_idx = video_id[1]
if self.subsampling is not None and self.subsampling >= 1:
batch = []
for _ in range(self.subsampling):
centerclip_idx = random.randint(
0, len(text_feature["start"]) - 1)
batch.append(
self.sampling(
video_idx,
video_feature,
text_feature,
centerclip_idx,
self._get_text_maxlen()
))
batch = self.batch_post_processing(batch, video_feature)
batch = default_data_collator(batch)
else:
raise ValueError(
"dataset.subsampling must be >= 1 for efficient video loading.")
batch = self.sampling(video_idx, video_feature, text_feature)
batch = self.batch_post_processing(batch, video_feature)
batch["video_id"] = video_id if isinstance(video_id, str) \
else video_id[0]
# e2e: make sure frame ids is into tensor.
assert torch.is_tensor(batch["vfeats"])
return batch
def sampling(
self,
video_idx,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
text_clip_indexs = self.text_clip_sampler(
text_feature, centerclip_idx,
sampled_max_text_len
)
if isinstance(video_feature, np.ndarray):
video_len = len(video_feature)
else:
video_len = math.ceil(text_feature["end"][-1])
video_end = min(
math.ceil(text_feature["end"][text_clip_indexs[-1]]),
video_len
)
video_start = max(
min(
math.floor(text_feature["start"][text_clip_indexs[0]]),
video_end),
0
)
video_clips = {"start": [video_start], "end": [video_end]}
# tensorize.
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
text_start = text_clip_indexs[0]
text_end = text_clip_indexs[-1] + 1
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_start": video_start,
"video_end": video_end,
"text_start": text_start,
"text_end": text_end,
}
class VariedLenAligner(FixedLenAligner):
def __init__(self, config):
super().__init__(config)
self.sampled_min_len = config.sampled_min_len
self.sampled_max_len = config.sampled_max_len
def _get_text_maxlen(self):
return random.randint(self.sampled_min_len, self.sampled_max_len)
class StartClipAligner(VariedLenAligner):
def sampling(
self,
video_idx,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
return super().sampling(
video_idx, video_feature, text_feature, 0)
class OverlappedAligner(VariedLenAligner):
"""video clip and text clip has overlappings
but may not be the same start/end."""
def __init__(self, config):
super().__init__(config)
self.sampled_video_min_len = config.sampled_video_min_len
self.sampled_video_max_len = config.sampled_video_max_len
self.video_clip_sampler = VideoClipSamplingProcessor()
def _get_video_maxlen(self):
return random.randint(
self.sampled_video_min_len, self.sampled_video_max_len)
def sampling(
self,
video_idx,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
text_clip_indexs = self.text_clip_sampler(
text_feature, centerclip_idx,
sampled_max_text_len
)
if isinstance(video_feature, np.ndarray):
video_len = len(video_feature)
else:
video_len = math.ceil(text_feature["end"][-1])
low = math.floor(text_feature["start"][text_clip_indexs[0]])
high = math.ceil(text_feature["end"][text_clip_indexs[-1]])
if low < high:
center = random.randint(low, high)
else:
center = int((low + high) // 2)
center = max(0, min(video_feature.shape[0] - 1, center))
assert 0 <= center < video_feature.shape[0]
video_clips = self.video_clip_sampler(
video_len, self._get_video_maxlen(), center
)
video_start = video_clips["start"][0]
video_end = video_clips["end"][0]
# tensorize.
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
text_start = text_clip_indexs[0]
text_end = text_clip_indexs[-1] + 1
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_start": video_start,
"video_end": video_end,
"text_start": text_start,
"text_end": text_end,
}
class MFMMLMAligner(FixedLenAligner):
"""
`FixedLenAligner` with Masked Language Model and Masked Frame Model.
"""
def __init__(self, config):
super().__init__(config)
keep_prob = config.keep_prob if config.keep_prob is not None else 1.0
self.text_clip_sampler = TextClipSamplingProcessor(
self.max_len - self.max_video_len - 3, keep_prob
)
self.sampled_min_len = config.sampled_min_len
self.sampled_max_len = config.sampled_max_len
self.masked_token_sampler = TextMaskingProcessor(config)
self.mm_type = config.mm_type \
if config.mm_type is not None else "full"
self.attnmasker = MMAttentionMask2DProcessor() \
if self.mm_type == "textgen" else None
self.masked_frame_sampler = FrameMaskingProcessor(config)
self.lazy_vfeat_mask = (
False if config.lazy_vfeat_mask is None else config.lazy_vfeat_mask
)
self.mm_prob = config.mm_prob if config.mm_prob is not None else 0.
def __call__(self, video_id, video_feature, text_feature):
from transformers import default_data_collator
if self.subsampling is not None and self.subsampling > 1:
batch = []
for _ in range(self.subsampling):
centerclip_idx = random.randint(
0, len(text_feature["start"]) - 1)
sampled_max_text_len = random.randint(
self.sampled_min_len, self.sampled_max_len
)
batch.append(
self.sampling(
video_id,
video_feature,
text_feature,
centerclip_idx,
sampled_max_text_len,
)
)
batch = self.batch_post_processing(batch, video_feature)
batch = default_data_collator(batch)
else:
batch = self.sampling(video_id, video_feature, text_feature)
batch = self.batch_post_processing(batch, video_feature)
batch["video_id"] = video_id if isinstance(video_id, str) \
else video_id[0]
return batch
def sampling(
self,
video_id,
video_feature,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
):
output = FixedLenAligner.sampling(self,
video_id, video_feature, text_feature,
centerclip_idx, sampled_max_text_len)
masking_text, masking_video = None, None
if random.random() < self.mm_prob:
if random.random() > 0.5:
masking_text, masking_video = self.mm_type, "no"
else:
masking_text, masking_video = "no", "full"
video_feats = output["vfeats"] if not self.lazy_vfeat_mask else None
video_label = self.masked_frame_sampler(
output["vmasks"], masking_video, vfeats=video_feats)
caps, text_label = self.masked_token_sampler(
output["caps"], masking_text)
output.update({
"caps": caps,
"video_label": video_label,
"text_label": text_label,
})
if self.attnmasker is not None:
attention_mask = self.attnmasker(
output["vmasks"], output["cmasks"], masking_text)
output.update({
"attention_mask": attention_mask
})
return output
class FrameMaskingProcessor(Processor):
def __init__(self, config):
self.mfm_probability = 0.15
if config.mfm_probability is not None:
self.mfm_probability = config.mfm_probability
def __call__(self, vmasks, modality_masking=None, vfeats=None):
"""
We perform lazy masking to save data transfer time.
It only generates video_labels by default and MFM model
will do actualy masking.
Return: `video_label` is a binary mask.
"""
video_label = vmasks.clone()
if modality_masking is not None:
if modality_masking == "full":
probability_matrix = torch.full(video_label.shape, 1.)
elif modality_masking == "no":
probability_matrix = torch.full(video_label.shape, 0.)
elif modality_masking == "inverse":
probability_matrix = torch.full(
video_label.shape, 1. - self.mfm_probability)
else:
raise ValueError("unknown modality masking.", modality_masking)
else:
probability_matrix = torch.full(
video_label.shape, self.mfm_probability)
masked_indices = torch.bernoulli(probability_matrix).bool()
# We only compute loss on masked tokens
video_label[~masked_indices] = 0
if vfeats is not None:
vfeats[video_label, :] = 0.0
return video_label
class TextGenerationProcessor(Processor):
def __init__(self, tokenizer):
self.bos_token_id = tokenizer.bos_token_id
self.pad_token_id = tokenizer.pad_token_id
def __call__(self, inputs):
labels = inputs.clone()
# [CLS] [SEP] for video
labels[:2] = -100
# keep [SEP] for text.
pad_mask = labels == self.pad_token_id
labels[pad_mask] = -100
inputs[2:] = torch.cat([
torch.LongTensor([self.bos_token_id]),
inputs[2:-1]])
inputs[pad_mask] = self.pad_token_id
assert len(inputs) == len(labels)
return inputs, labels
class TextMaskingProcessor(Processor):
def __init__(self, config):
"""this function is borrowed from
`transformers/data/data_collator.DataCollatorForLanguageModeling`"""
self.mlm_probability = 0.15
if config.mlm_probability is not None:
self.mlm_probability = config.mlm_probability
self.bert_name = config.bert_name
# [CLS] is used as bos_token and [SEP] is used as eos_token.
# https://huggingface.co/transformers/master/model_doc/bertgeneration.html
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, bos_token="[CLS]", eos_token="[SEP]")
self.textgen = TextGenerationProcessor(self.tokenizer)
def __call__(
self, inputs: torch.Tensor,
modality_masking=None,
special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
expand modality_masking into
None: traditional bert masking.
"no": no masking.
"full": all [MASK] token for generation.
"gen": autoregressive generation.
"""
"""
Prepare masked tokens inputs/labels for masked language modeling:
80% MASK, 10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training
# (with probability `self.mlm_probability`)
if modality_masking is not None:
if modality_masking == "full":
probability_matrix = torch.full(labels.shape, 1.)
elif modality_masking == "no":
probability_matrix = torch.full(labels.shape, 0.)
elif modality_masking.startswith("textgen"):
# [CLS] [SEP] <s> ...
inputs, labels = self.textgen(inputs)
if "mask" not in modality_masking:
return inputs, labels
inputs = self.mask_input(inputs, special_tokens_mask)
return inputs, labels
elif modality_masking == "mask":
inputs = self.mask_input(inputs, special_tokens_mask)
labels = torch.full(inputs.shape, -100)
return inputs, labels
elif modality_masking == "inverse":
probability_matrix = torch.full(labels.shape, 1. - self.mlm_probability)
else:
raise ValueError("unknown modality masking.", modality_masking)
else:
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = self.get_special_tokens_mask(
labels.tolist(), already_has_special_tokens=True
)
special_tokens_mask = torch.tensor(
special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time,
# we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = (
torch.bernoulli(
torch.full(labels.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(labels.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(
len(self.tokenizer), labels.shape, dtype=torch.long
)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input
# tokens unchanged
return inputs, labels
def mask_input(self, inputs, special_tokens_mask=None):
# the following is new with masked autoregressive.
probability_matrix = torch.full(
inputs.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = self.get_special_tokens_mask(
inputs.tolist(), already_has_special_tokens=True
)
special_tokens_mask = torch.tensor(
special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
indices_replaced = (
torch.bernoulli(
torch.full(inputs.shape, 0.8)).bool() & masked_indices
)
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
# 10% of the time, we replace masked input tokens with random word
indices_random = (
torch.bernoulli(torch.full(inputs.shape, 0.5)).bool()
& masked_indices
& ~indices_replaced
)
random_words = torch.randint(
len(self.tokenizer), inputs.shape, dtype=torch.long
)
inputs[indices_random] = random_words[indices_random]
return inputs
def get_special_tokens_mask(
self, token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False
) -> List[int]:
"""
Note: the version from transformers do not consider pad
as special tokens.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if"
"the provided sequence of "
"ids is already formated with special tokens "
"for the model."
)
return list(map(lambda x: 1 if x in [
self.tokenizer.sep_token_id,
self.tokenizer.cls_token_id,
self.tokenizer.pad_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
class TextClipSamplingProcessor(Processor):
def __init__(self, max_text_len, keep_prob=1.0):
self.max_text_len = max_text_len
self.max_video_len = 256 # always hold.
self.keep_prob = keep_prob
def __call__(
self,
text_feature,
centerclip_idx=None,
sampled_max_text_len=None,
sampled_max_video_len=None,
):
# Let's use all caps for now and see if 256 can cover all of them.
if sampled_max_text_len is not None:
max_text_len = sampled_max_text_len
else:
max_text_len = self.max_text_len
if sampled_max_video_len is not None:
max_video_len = sampled_max_video_len
else:
max_video_len = self.max_video_len
t_num_clips = len(text_feature["start"])
if centerclip_idx is None:
centerclip_idx = random.randint(0, t_num_clips - 1)
start_idx, end_idx = centerclip_idx, centerclip_idx + 1
text_clip_indexs = deque()
text_clip_indexs.append(start_idx)
text_len = len(text_feature["cap"][start_idx])
video_len = max(
0,
text_feature["end"][start_idx]
- text_feature["start"][start_idx],
)
while (
(start_idx > 0 or end_idx < t_num_clips)
and text_len < max_text_len
and video_len < max_video_len
):
if random.random() > 0.5 and end_idx < t_num_clips:
# skip the next one?
if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips:
end_idx = end_idx + 1
text_clip_indexs.append(end_idx)
text_len += len(text_feature["cap"][end_idx])
end_idx += 1
elif start_idx > 0:
if random.random() > self.keep_prob and (start_idx - 1) > 0:
start_idx = start_idx - 1
start_idx -= 1
text_clip_indexs.insert(0, start_idx)
text_len += len(text_feature["cap"][start_idx])
else:
if end_idx < t_num_clips:
if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips:
end_idx = end_idx + 1
text_clip_indexs.append(end_idx)
text_len += len(text_feature["cap"][end_idx])
end_idx += 1
else:
return text_clip_indexs
video_len = max(
0,
text_feature["end"][text_clip_indexs[-1]]
- text_feature["start"][text_clip_indexs[0]],
)
return text_clip_indexs
class VideoClipSamplingProcessor(Processor):
def __call__(self, video_len, max_video_len, center):
"""
`video_len`: length of the video.
`max_video_len`: maximum video tokens allowd in a sequence.
`center`: initial starting index.
"""
assert center >= 0 and center < video_len
t_clip_len = 0
start, end = center, center
while (start > 0 or end < video_len) and t_clip_len < max_video_len:
# decide the direction to grow.
if start <= 0:
end += 1
elif end >= video_len:
start -= 1
elif random.random() > 0.5:
end += 1
else:
start -= 1
t_clip_len += 1
return {"start": [start], "end": [end]}
class How2MILNCEAligner(FixedLenAligner):
"""reference: `antoine77340/MIL-NCE_HowTo100M/video_loader.py`"""
def __init__(self, config):
super().__init__(config)
self.num_candidates = 4
self.min_time = 5.0
self.num_sec = 3.2
# self.num_sec = self.num_frames / float(self.fps) num_frames=16 / fps = 5
# self.num_frames = 16
def sampling(
self,
video_id,
video_feature,
text_feature,
centerclip_idx=None, # will be ignored.
sampled_max_text_len=None # will be ignored.
):
text, start, end = self._get_text(text_feature)
video = self._get_video(video_feature, start, end)
vfeats = torch.zeros((self.max_video_len, video_feature.shape[1]))
vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool)
vfeats[: video.shape[0]] = torch.from_numpy(np.array(video))
vmasks[: video.shape[0]] = 1
caps, cmasks = [], []
for words in text:
cap, cmask = self._build_text_seq(text_feature, words)
caps.append(cap)
cmasks.append(cmask)
caps = torch.stack(caps)
cmasks = torch.stack(cmasks)
# video of shape: (video_len)
# text of shape (num_candidates, max_text_len)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
# "video_id": video_id,
}
def _get_video(self, video_feature, start, end):
start_seek = random.randint(start, int(max(start, end - self.num_sec)))
# duration = self.num_sec + 0.1
return video_feature[start_seek : int(start_seek + self.num_sec)]
def _get_text(self, cap):
ind = random.randint(0, len(cap["start"]) - 1)
if self.num_candidates == 1:
words = [ind]
else:
words = []
cap_start = self._find_nearest_candidates(cap, ind)
for i in range(self.num_candidates):
words.append([max(0, min(len(cap["cap"]) - 1, cap_start + i))])
start, end = cap["start"][ind], cap["end"][ind]
# TODO: May need to be improved for edge cases.
# expand the min time.
if end - start < self.min_time:
diff = self.min_time - end + start
start = max(0, start - diff / 2)
end = start + self.min_time
return words, int(start), int(end)
def _find_nearest_candidates(self, caption, ind):
"""find the range of the clips."""
start, end = ind, ind
#diff = caption["end"][end] - caption["start"][start]
n_candidate = 1
while n_candidate < self.num_candidates:
# the first clip
if start == 0:
return 0
# we add () in the following condition to fix the bug.
elif end == (len(caption["start"]) - 1):
return start - (self.num_candidates - n_candidate)
elif (caption["end"][end] - caption["start"][start - 1]) < (
caption["end"][end + 1] - caption["start"][start]
):
start -= 1
else:
end += 1
n_candidate += 1
return start
class PKLJSONStrTextProcessor(TextProcessor):
"""`caption.json` from howto100m are preprocessed as a
dict `[video_id, json_str]`.
Json parsing tokenization are conducted on-the-fly and cached into dict.
"""
def __init__(self, config, max_clip_text_len=96):
print("[Warning] PKLJSONStrTextProcessor is slow for num_workers > 0.")
self.caption_pkl_path = str(config.caption_pkl_path)
with open(self.caption_pkl_path, "rb") as fd:
self.data = pickle.load(fd)
self.max_clip_text_len = max_clip_text_len
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
str(config.bert_name), use_fast=config.use_fast
)
def __call__(self, video_id):
caption = self.data[video_id]
if isinstance(caption, str):
import json
caption = json.loads(caption)
cap = []
for clip_idx, text_clip in enumerate(caption["text"]):
clip_ids = []
if isinstance(text_clip, str):
clip_ids = self.tokenizer(
text_clip[: self.max_clip_text_len],
add_special_tokens=False
)["input_ids"]
cap.append(clip_ids)
caption["cap"] = cap
caption.pop("text") # save space.
self.data[video_id] = caption
return caption
| 32,302 | 35.377252 | 88 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/processors/processor.py | # Copyright (c) Facebook, Inc. All Rights Reserved
import numpy as np
import os
import torch
class Processor(object):
"""
A generic processor for video (codec, feature etc.) and text.
"""
def __call__(self, **kwargs):
raise NotImplementedError
class MetaProcessor(Processor):
"""
A meta processor is expected to load the metadata of a dataset:
(e.g., video_ids, or captions).
You must implement the `__getitem__` (meta datasets are rather diverse.).
"""
def __init__(self, config):
self.split = config.split
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
raise NotImplementedError
def _get_split_path(self, config):
splits = {
"train": config.train_path,
"valid": config.val_path,
"test": config.test_path,
}
if config.split is not None:
return splits[config.split]
return config.train_path
class TextProcessor(Processor):
"""
A generic Text processor: rename this as `withTokenizer`.
tokenize a string of text on-the-fly.
Warning: mostly used for end tasks.
(on-the-fly tokenization is slow for how2.)
TODO(huxu): move this class as a subclass.
"""
def __init__(self, config):
self.bert_name = str(config.bert_name)
self.use_fast = config.use_fast
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, use_fast=self.use_fast
)
def __call__(self, text_id):
caption = self.tokenizer(text_id, add_special_tokens=False)
return caption["input_ids"]
class VideoProcessor(Processor):
"""
A generic video processor: load a numpy video tokens by default.
"""
def __init__(self, config):
self.vfeat_dir = config.vfeat_dir
def __call__(self, video_fn):
if isinstance(video_fn, tuple):
video_fn = video_fn[0]
assert isinstance(video_fn, str)
video_fn = os.path.join(self.vfeat_dir, video_fn + ".npy")
feat = np.load(video_fn)
return feat
class Aligner(object):
"""
An alignprocessor align video and text and output a dict of tensors (for a model).
"""
def __init__(self, config):
"""__init__ needs to be light weight for more workers/threads."""
self.split = config.split
self.max_video_len = config.max_video_len
self.max_len = config.max_len
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
str(config.bert_name), use_fast=config.use_fast
)
self.cls_token_id = tokenizer.cls_token_id
self.sep_token_id = tokenizer.sep_token_id
self.pad_token_id = tokenizer.pad_token_id
self.mask_token_id = tokenizer.mask_token_id
def __call__(self, video_id, video_feature, text_feature):
raise NotImplementedError
def _build_video_seq(self, video_feature, video_clips=None):
"""
`video_feature`: available video tokens.
`video_clips`: video clip sequence to build.
"""
if not isinstance(video_feature, np.ndarray):
raise ValueError(
"unsupported type of video_feature", type(video_feature)
)
if video_clips is None:
# this is borrowed from DSAligner
video_start = 0
video_end = min(len(video_feature), self.max_video_len)
# the whole sequence is a single clip.
video_clips = {"start": [video_start], "end": [video_end]}
vfeats = np.zeros(
(self.max_video_len, video_feature.shape[1]), dtype=np.float32
)
vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool)
video_len = 0
for start, end in zip(video_clips["start"], video_clips["end"]):
clip_len = min(self.max_video_len - video_len, (end - start))
if clip_len > 0:
vfeats[video_len: video_len + clip_len] = video_feature[
start: start + clip_len
]
vmasks[video_len: video_len + clip_len] = 1
video_len += clip_len
vfeats = torch.from_numpy(vfeats)
return vfeats, vmasks
def _build_text_seq(self, text_feature, text_clip_indexs=None):
"""
`text_feature`: all available clips.
`text_clip_indexes`: clip sequence to build.
"""
if text_clip_indexs is None:
text_clip_indexs = [0]
full_caps = []
if isinstance(text_feature, dict):
for clip_idx in text_clip_indexs:
full_caps.extend(text_feature["cap"][clip_idx])
else:
full_caps = text_feature
max_text_len = self.max_len - self.max_video_len - 3
full_caps = full_caps[:max_text_len]
full_caps = (
[self.cls_token_id, self.sep_token_id] + full_caps + [self.sep_token_id]
)
text_pad_len = self.max_len - len(full_caps) - self.max_video_len
padded_full_caps = full_caps + [self.pad_token_id] * text_pad_len
caps = torch.LongTensor(padded_full_caps)
cmasks = torch.zeros((len(padded_full_caps),), dtype=torch.bool)
cmasks[: len(full_caps)] = 1
return caps, cmasks
def batch_post_processing(self, batch, video_feature):
return batch
class MMAttentionMask2DProcessor(Processor):
"""text generation requires 2d mask
that is harder to generate by GPU at this stage."""
def __call__(self, vmask, cmask, mtype):
if mtype == "textgen":
return self._build_textgeneration_mask(vmask, cmask)
elif mtype == "videogen":
return self._build_videogeneration_mask(vmask, cmask)
else:
return self._build_mm_mask(vmask, cmask)
def _build_mm_mask(self, vmask, cmask):
mask_1d = torch.cat([cmask[:1], vmask, cmask[1:]], dim=0)
return mask_1d[None, :].repeat(mask_1d.size(0), 1)
def _build_videogeneration_mask(self, vmask, cmask):
# cls_mask is only about text otherwise it will leak generation.
cls_text_mask = torch.cat([
# [CLS]
torch.ones(
(1,), dtype=torch.bool, device=cmask.device),
# video tokens and [SEP] for video.
torch.zeros(
(vmask.size(0) + 1,), dtype=torch.bool, device=cmask.device),
cmask[2:]
], dim=0)
# concat horizontially.
video_len = int(vmask.sum())
video_masks = torch.cat([
# [CLS]
torch.ones(
(video_len, 1), dtype=torch.bool, device=cmask.device
),
torch.tril(
torch.ones(
(video_len, video_len),
dtype=torch.bool, device=cmask.device)),
# video_padding
torch.zeros(
(video_len, vmask.size(0) - video_len),
dtype=torch.bool, device=cmask.device
),
# [SEP] for video (unused).
torch.zeros(
(video_len, 1), dtype=torch.bool, device=cmask.device
),
cmask[2:].unsqueeze(0).repeat(video_len, 1)
], dim=1)
text_masks = cls_text_mask[None, :].repeat(
cmask.size(0) - 2, 1)
video_padding_masks = cls_text_mask[None, :].repeat(
vmask.size(0) - video_len, 1)
return torch.cat([
cls_text_mask[None, :],
video_masks,
video_padding_masks,
torch.cat([cmask[:1], vmask, cmask[1:]], dim=0)[None,:],
text_masks
], dim=0)
def _build_textgeneration_mask(self, vmask, cmask):
# cls_mask is only about video otherwise it will leak generation.
cls_video_mask = torch.cat([
# [CLS]
torch.ones(
(1,), dtype=torch.bool, device=cmask.device),
vmask,
# [SEP]
torch.ones((1,), dtype=torch.bool, device=cmask.device),
torch.zeros(
(cmask.size(0)-2,), dtype=torch.bool, device=cmask.device)
], dim=0)
# concat horizontially.
text_len = int(cmask[2:].sum())
text_masks = torch.cat([
# [CLS]
torch.ones(
(text_len, 1), dtype=torch.bool, device=cmask.device
),
vmask.unsqueeze(0).repeat(text_len, 1),
# [SEP] for video.
torch.ones(
(text_len, 1), dtype=torch.bool, device=cmask.device
),
torch.tril(
torch.ones(
(text_len, text_len),
dtype=torch.bool, device=cmask.device)),
# padding.
torch.zeros(
(text_len, cmask.size(0) - text_len - 2),
dtype=torch.bool, device=cmask.device
)
], dim=1)
cls_video_masks = cls_video_mask[None, :].repeat(
vmask.size(0) + 2, 1)
text_padding_masks = cls_video_mask[None, :].repeat(
cmask.size(0) - text_len - 2, 1)
return torch.cat([
cls_video_masks, text_masks, text_padding_masks], dim=0)
| 9,358 | 33.032727 | 86 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/processors/dsprocessor.py | # Copyright (c) Facebook, Inc. All Rights Reserved
"""
Processors for all downstream (ds) tasks.
"""
import json
import os
import pickle
import random
import math
import numpy as np
import torch
from collections import defaultdict
from .processor import (
MetaProcessor,
VideoProcessor,
TextProcessor,
Aligner,
MMAttentionMask2DProcessor,
)
from .how2processor import TextGenerationProcessor
# ------------- A General Aligner for all downstream tasks-----------------
class DSAligner(Aligner):
"""
Downstream (DS) aligner shared by all datasets.
"""
def __call__(self, video_id, video_feature, text_feature, wps=0.7):
# random sample a starting sec for video.
video_start = 0
video_end = min(len(video_feature), self.max_video_len)
# the whole sequence is a single clip.
video_clips = {"start": [video_start], "end": [video_end]}
text_feature = {
"cap": [text_feature],
"start": [video_start],
"end": [len(text_feature) / wps],
}
text_clip_indexs = [0]
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_id": video_id,
}
class NLGTextProcessor(TextProcessor):
"""
Also return the original text as ref.
"""
def __call__(self, text_id):
return super().__call__(text_id), text_id
class DSNLGAligner(DSAligner):
"""extend with the capability of 2d mask for generation."""
def __init__(self, config):
super().__init__(config)
self.attnmasker = MMAttentionMask2DProcessor()
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, use_fast=self.use_fast,
bos_token="[CLS]", eos_token="[SEP]"
)
self.tokenizer = tokenizer
self.bos_token_id = tokenizer.bos_token_id
self.eos_token_id = tokenizer.eos_token_id
self.textgen = TextGenerationProcessor(tokenizer)
def __call__(self, video_id, video_feature, text_feature):
output = super().__call__(video_id, video_feature, text_feature[0])
if self.split == "test":
# output.update({"ref": text_feature[1]})
output.update({"ref": self.tokenizer.decode(
output["caps"], skip_special_tokens=True)})
text_label = output["caps"]
cmasks = torch.BoolTensor([1] * text_label.size(0))
caps = torch.LongTensor([
self.cls_token_id,
self.sep_token_id,
self.bos_token_id])
else:
caps, text_label = self.textgen(output["caps"])
cmasks = output["cmasks"]
attention_mask = self.attnmasker(
output["vmasks"], cmasks, "textgen")
output.update({
"caps": caps,
"cmasks": cmasks,
"text_label": text_label,
"attention_mask": attention_mask,
})
return output
# -------------------- MSRVTT ------------------------
class MSRVTTMetaProcessor(MetaProcessor):
"""MSRVTT dataset.
reference: `howto100m/msrvtt_dataloader.py`
"""
def __init__(self, config):
super().__init__(config)
import pandas as pd
data = pd.read_csv(self._get_split_path(config))
# TODO: add a text1ka flag.
if config.split == "train" \
and config.full_test_path is not None \
and config.jsfusion_path is not None:
# add testing videos from full_test_path not used by jfusion.
additional_data = pd.read_csv(config.full_test_path)
jsfusion_data = pd.read_csv(config.jsfusion_path)
for video_id in additional_data["video_id"]:
if video_id not in jsfusion_data["video_id"].values:
data = data.append(
{"video_id": video_id}, ignore_index=True)
if config.dup is not None and config.split == "train":
data = data.append([data] * (config.dup - 1), ignore_index=True)
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""slightly modify with if condition to combine train/test."""
vid, sentence = None, None
vid = self.data["video_id"].values[idx]
if "sentence" in self.data: # for testing.
sentence = self.data["sentence"].values[idx]
else: # for training.
sentence = vid
return vid, sentence
class MSRVTTTextProcessor(TextProcessor):
"""MSRVTT dataset.
reference: `msrvtt_dataloader.py` `MSRVTT_TrainDataLoader`.
TODO (huxu): add max_words.
"""
def __init__(self, config):
super().__init__(config)
self.sentences = None
if config.json_path is not None and config.split == "train":
with open(config.json_path) as fd:
self.data = json.load(fd)
self.sentences = defaultdict(list)
for s in self.data["sentences"]:
self.sentences[s["video_id"]].append(s["caption"])
def __call__(self, text_id):
if self.sentences is not None:
rind = random.randint(0, len(self.sentences[text_id]) - 1)
sentence = self.sentences[text_id][rind]
else:
sentence = text_id
caption = self.tokenizer(sentence, add_special_tokens=False)
return caption["input_ids"]
class MSRVTTNLGTextProcessor(MSRVTTTextProcessor):
"""TODO: change dsaligner and merge to avoid any NLG text processor."""
def __call__(self, text_id):
if self.sentences is not None:
rind = random.randint(0, len(self.sentences[text_id]) - 1)
sentence = self.sentences[text_id][rind]
else:
sentence = text_id
caption = self.tokenizer(sentence, add_special_tokens=False)
return caption["input_ids"], sentence
class MSRVTTQAMetaProcessor(MetaProcessor):
"""MSRVTT-QA: retrieval-based multi-choice QA from JSFusion dataset.
For simplicity, we use the train retrieval model.
reference: `https://github.com/yj-yu/lsmdc`
"""
def __init__(self, config):
super().__init__(config)
import pandas as pd
csv_data = pd.read_csv(self._get_split_path(config), sep="\t")
data = []
for video_id, a1, a2, a3, a4, a5, answer in zip(
csv_data["vid_key"].values,
csv_data["a1"].values,
csv_data["a2"].values,
csv_data["a3"].values,
csv_data["a4"].values,
csv_data["a5"].values,
csv_data["answer"].values):
video_id = video_id.replace("msr", "video")
data.append((video_id, (answer, [a1, a2, a3, a4, a5])))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class MSRVTTQATextProcessor(TextProcessor):
"""MSRVTT-QA dataset.
text_ans is of format `(answer, [a1, a2, a3, a4, a5])`.
"""
def __call__(self, text_ans):
for ans_idx, ans in enumerate(text_ans[1]):
if isinstance(ans, str):
text_ans[1][ans_idx] = self.tokenizer(ans, add_special_tokens=False)["input_ids"]
return text_ans
class MSRVTTQAAligner(DSAligner):
"""MSRVTT dataset.
similar to sample in how2.
we call __call__ multiple times.
"""
def __call__(self, video_id, video_feature, text_feature, wps=0.7):
caps = []
cmasks = []
answer = text_feature[0]
for ans_idx, _text_feature in enumerate(text_feature[1]):
output = super().__call__(
video_id, video_feature, _text_feature, wps)
caps.append(output["caps"])
cmasks.append(output["cmasks"])
output.update({
"caps": torch.stack(caps),
"cmasks": torch.stack(cmasks),
"answers": torch.LongTensor([answer]),
})
return output
# -------------------- Youcook -----------------------
class YoucookMetaProcessor(MetaProcessor):
"""Youcook dataset.
reference: `howto100m/youcook_dataloader.py`
note that the data can be different as the
(1) some videos already in Howto100m are removed.
(2) stop words are removed from caption
TODO (huxu): make a flag to load the original caption.
(see youcookii_annotations_trainval.json).
The max_video_len can be 264 and text can be 64 tokens.
In reality we may not need that long. see projects/task/youcook.yaml
"""
def __init__(self, config):
super().__init__(config)
vfeat_dir = config.vfeat_dir
print(self._get_split_path(config))
with open(self._get_split_path(config), "rb") as fd:
data = pickle.load(fd)
all_valid_video_ids = set(
[os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)]
)
recs = []
video_ids = set()
valid_video_ids = set()
for rec in data: # filter videos not available.
udl_idx = rec["id"].rindex("_")
video_id = rec["id"][:udl_idx]
video_ids.add(video_id)
if video_id in all_valid_video_ids:
valid_video_ids.add(video_id)
recs.append(rec)
print("total video_ids in .pkl", len(video_ids))
print("valid video_ids in .pkl", len(valid_video_ids))
print("please verify {train,val}_list.txt")
data = recs
self.data = data
with open(config.trainval_annotation) as fd:
self.youcook_annotation = json.load(fd)["database"]
if config.use_annotation_text is True:
print("using text in annotation.")
self.use_annotation_caption = True
else:
self.use_annotation_caption = False
def __getitem__(self, idx):
def _get_video_and_caption(rec):
vid = rec["id"]
udl_idx = vid.rindex("_")
video_id, clip_id = vid[:udl_idx], int(vid[udl_idx + 1:])
clip = self.youcook_annotation[video_id]["annotations"][clip_id]
start, end = clip["segment"]
if self.use_annotation_caption:
caption = clip["sentence"]
else:
caption = rec["caption"]
return (video_id, start, end), caption
rec = self.data[idx]
video_info, text_info = _get_video_and_caption(rec)
return video_info, text_info
class YoucookVideoProcessor(VideoProcessor):
"""video_fn is a tuple of (video_id, start, end) now."""
def __call__(self, video_fn):
video_id, start, end = video_fn
feat = np.load(os.path.join(self.vfeat_dir, video_id + ".npy"))
return feat[start:end]
class YoucookNLGMetaProcessor(MetaProcessor):
"""NLG uses the original split:
`train_list.txt` and `val_list.txt`
"""
def __init__(self, config):
super().__init__(config)
vfeat_dir = config.vfeat_dir
print(self._get_split_path(config))
with open(self._get_split_path(config)) as fd:
video_ids = [
line.strip().split("/")[1] for line in fd.readlines()]
print("total video_ids in train/val_list.txt", len(video_ids))
all_valid_video_ids = set(
[os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)]
)
video_ids = [
video_id for video_id in video_ids
if video_id in all_valid_video_ids]
print("valid video_ids in train/val_list.txt", len(video_ids))
with open(config.trainval_annotation) as fd:
self.youcook_annotation = json.load(fd)["database"]
data = []
for video_id in video_ids:
for clip in self.youcook_annotation[video_id]["annotations"]:
start, end = clip["segment"]
caption = clip["sentence"]
data.append(((video_id, start, end), caption))
self.data = data
def __getitem__(self, idx):
return self.data[idx]
# --------------------- CrossTask -------------------------
class CrossTaskMetaProcessor(MetaProcessor):
def __init__(self, config):
super().__init__(config)
np.random.seed(0) # deterministic random split.
task_vids = self._get_vids(
config.train_csv_path,
config.vfeat_dir,
config.annotation_path)
val_vids = self._get_vids(
config.val_csv_path,
config.vfeat_dir,
config.annotation_path)
# filter out those task and vids appear in val_vids.
task_vids = {
task: [
vid for vid in vids
if task not in val_vids or vid not in val_vids[task]]
for task, vids in task_vids.items()}
primary_info = self._read_task_info(config.primary_path)
test_tasks = set(primary_info['steps'].keys())
# if args.use_related:
related_info = self._read_task_info(config.related_path)
task_steps = {**primary_info['steps'], **related_info['steps']}
n_steps = {**primary_info['n_steps'], **related_info['n_steps']}
# else:
# task_steps = primary_info['steps']
# n_steps = primary_info['n_steps']
all_tasks = set(n_steps.keys())
# filter and keep task in primary or related.
task_vids = {
task: vids for task, vids in task_vids.items()
if task in all_tasks}
# vocab-by-step matrix (A) and vocab (M)
# (huxu): we do not use BoW.
# A, M = self._get_A(task_steps, share="words")
train_vids, test_vids = self._random_split(
task_vids, test_tasks, config.n_train)
print("train_num_videos", sum(len(vids) for vids in train_vids.values()))
print("test_num_videos", sum(len(vids) for vids in test_vids.values()))
# added by huxu to automatically determine the split.
split_map = {
"train": train_vids,
"valid": test_vids,
"test": test_vids
}
task_vids = split_map[config.split]
self.vids = []
for task, vids in task_vids.items():
self.vids.extend([(task, vid) for vid in vids])
self.task_steps = task_steps
self.n_steps = n_steps
def __getitem__(self, idx):
task, vid = self.vids[idx]
n_steps = self.n_steps[task]
steps = self.task_steps[task]
assert len(steps) == n_steps
return (task, vid, steps, n_steps), (task, vid, steps, n_steps)
def __len__(self):
return len(self.vids)
def _random_split(self, task_vids, test_tasks, n_train):
train_vids = {}
test_vids = {}
for task, vids in task_vids.items():
if task in test_tasks and len(vids) > n_train:
train_vids[task] = np.random.choice(
vids, n_train, replace=False).tolist()
test_vids[task] = [
vid for vid in vids if vid not in train_vids[task]]
else:
train_vids[task] = vids
return train_vids, test_vids
def _get_vids(self, path, vfeat_dir, annotation_path):
"""refactored from
https://github.com/DmZhukov/CrossTask/blob/master/data.py
changes: add `vfeat_dir` to check if the video is available.
add `annotation_path` to check if the video is available.
"""
task_vids = {}
with open(path, 'r') as f:
for line in f:
task, vid, url = line.strip().split(',')
# double check the video is available.
if not os.path.exists(
os.path.join(vfeat_dir, vid + ".npy")):
continue
# double check the annotation is available.
if not os.path.exists(os.path.join(
annotation_path,
task + "_" + vid + ".csv")):
continue
if task not in task_vids:
task_vids[task] = []
task_vids[task].append(vid)
return task_vids
def _read_task_info(self, path):
titles = {}
urls = {}
n_steps = {}
steps = {}
with open(path, 'r') as f:
idx = f.readline()
while idx != '':
idx = idx.strip()
titles[idx] = f.readline().strip()
urls[idx] = f.readline().strip()
n_steps[idx] = int(f.readline().strip())
steps[idx] = f.readline().strip().split(',')
next(f)
idx = f.readline()
return {
'title': titles,
'url': urls,
'n_steps': n_steps,
'steps': steps
}
def _get_A(self, task_steps, share="words"):
raise ValueError("running get_A is not allowed for BERT.")
"""Step-to-component matrices."""
if share == 'words':
# share words
task_step_comps = {
task: [step.split(' ') for step in steps]
for task, steps in task_steps.items()}
elif share == 'task_words':
# share words within same task
task_step_comps = {
task: [[task+'_'+tok for tok in step.split(' ')] for step in steps]
for task, steps in task_steps.items()}
elif share == 'steps':
# share whole step descriptions
task_step_comps = {
task: [[step] for step in steps] for task, steps in task_steps.items()}
else:
# no sharing
task_step_comps = {
task: [[task+'_'+step] for step in steps]
for task, steps in task_steps.items()}
# BERT tokenizer here?
vocab = []
for task, steps in task_step_comps.items():
for step in steps:
vocab.extend(step)
vocab = {comp: m for m, comp in enumerate(set(vocab))}
M = len(vocab)
A = {}
for task, steps in task_step_comps.items():
K = len(steps)
a = torch.zeros(M, K)
for k, step in enumerate(steps):
a[[vocab[comp] for comp in step], k] = 1
a /= a.sum(dim=0)
A[task] = a
return A, M
class CrossTaskVideoProcessor(VideoProcessor):
def __call__(self, video_fn):
task, vid, steps, n_steps = video_fn
video_fn = os.path.join(self.vfeat_dir, vid + ".npy")
feat = np.load(video_fn)
return feat
class CrossTaskTextProcessor(TextProcessor):
def __call__(self, text_id):
task, vid, steps, n_steps = text_id
step_ids = []
for step_str in steps:
step_ids.append(
self.tokenizer(step_str, add_special_tokens=False)["input_ids"]
)
return step_ids
class CrossTaskAligner(Aligner):
"""
TODO: it's not clear yet the formulation of the task; finish this later.
"""
def __init__(self, config):
super().__init__(config)
self.annotation_path = config.annotation_path
self.sliding_window = config.sliding_window
self.sliding_window_size = config.sliding_window_size
def __call__(self, video_id, video_feature, text_feature):
task, vid, steps, n_steps = video_id
annot_path = os.path.join(
self.annotation_path, task + '_' + vid + '.csv')
video_len = len(video_feature)
labels = torch.from_numpy(self._read_assignment(
video_len, n_steps, annot_path)).float()
vfeats, vmasks, targets = [], [], []
# sliding window on video features and targets.
for window_start in range(0, video_len, self.sliding_window):
video_start = 0
video_end = min(video_len - window_start, self.sliding_window_size)
video_clip = {"start": [video_start], "end": [video_end]}
vfeat, vmask = self._build_video_seq(
video_feature[window_start: window_start + video_end],
video_clip
)
target = labels[window_start: window_start + video_end]
assert len(vfeat) >= len(target), "{},{}".format(len(vfeat), len(target))
# TODO: randomly drop all zero targets for training ?
# if self.split == "train" and target.sum() == 0:
# continue
vfeats.append(vfeat)
vmasks.append(vmask)
targets.append(target)
if (video_len - window_start) <= self.sliding_window_size:
break
vfeats = torch.stack(vfeats)
vmasks = torch.stack(vmasks)
targets = torch.cat(targets, dim=0)
caps, cmasks = [], []
for step in text_feature:
step_text_feature = {"start": [0], "end": [1], "cap": [step]}
step_text_clip_index = [0]
cap, cmask = self._build_text_seq(
step_text_feature, step_text_clip_index
)
caps.append(cap)
cmasks.append(cmask)
caps = torch.stack(caps)
cmasks = torch.stack(cmasks)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats, # X for original code.
"vmasks": vmasks,
"targets": targets,
"video_id": vid,
"task": task,
"video_len": video_len # for later checking.
}
def _read_assignment(self, T, K, path):
"""
refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py
Howto interpret contraints on loss that is going to be minimized:
lambd is a big number;
self.lambd * C is a big number for all valid position (csv stores invalids)
def forward(self, O, Y, C):
return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum()
This will load the csv file and fill-in the step col from start to end rows.
"""
Y = np.zeros([T, K], dtype=np.uint8)
with open(path, 'r') as f:
for line in f:
step, start, end = line.strip().split(',')
start = int(math.floor(float(start)))
end = int(math.ceil(float(end)))
step = int(step) - 1
Y[start:end, step] = 1
return Y
# --------------------- COIN -------------------------
class MetaTextBinarizer(Aligner):
def __call__(self, text_feature):
text_feature = {
"cap": [text_feature],
"start": [0.],
"end": [100.],
}
text_clip_indexs = [0]
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
return {"caps": caps, "cmasks": cmasks}
class COINActionSegmentationMetaProcessor(MetaProcessor):
split_map = {
"train": "training",
"valid": "testing",
"test": "testing",
}
def __init__(self, config):
super().__init__(config)
with open(self._get_split_path(config)) as fr:
database = json.load(fr)["database"]
id2label = {}
data = []
# filter the data by split.
for video_id, rec in database.items():
# always use testing to determine label_set
if rec["subset"] == "testing":
for segment in rec["annotation"]:
id2label[int(segment["id"])] = segment["label"]
# text_labels is used for ZS setting
self.text_labels = ["none"] * len(id2label)
for label_id in id2label:
self.text_labels[label_id-1] = id2label[label_id]
id2label[0] = "O"
print("num of labels", len(id2label))
for video_id, rec in database.items():
if not os.path.isfile(os.path.join(config.vfeat_dir, video_id + ".npy")):
continue
if rec["subset"] == COINActionSegmentationMetaProcessor.split_map[self.split]:
starts, ends, labels = [], [], []
for segment in rec["annotation"]:
start, end = segment["segment"]
label = int(segment["id"])
starts.append(start)
ends.append(end)
labels.append(label)
data.append(
(video_id, {"start": starts, "end": ends, "label": labels}))
self.data = data
def meta_text_labels(self, config):
from transformers import default_data_collator
from ..utils import get_local_rank
text_processor = TextProcessor(config)
binarizer = MetaTextBinarizer(config)
# TODO: add prompts to .yaml.
text_labels = [label for label in self.text_labels]
if get_local_rank() == 0:
print(text_labels)
outputs = []
for text_label in text_labels:
text_feature = text_processor(text_label)
outputs.append(binarizer(text_feature))
return default_data_collator(outputs)
def __getitem__(self, idx):
return self.data[idx]
class COINActionSegmentationTextProcessor(TextProcessor):
def __call__(self, text_label):
return text_label
class COINActionSegmentationAligner(Aligner):
def __init__(self, config):
super().__init__(config)
self.sliding_window = config.sliding_window
self.sliding_window_size = config.sliding_window_size
def __call__(self, video_id, video_feature, text_feature):
starts, ends, label_ids = text_feature["start"], text_feature["end"], text_feature["label"]
# sliding window.
video_len = len(video_feature)
vfeats, vmasks, targets = [], [], []
# sliding window on video features and targets.
for window_start in range(0, video_len, self.sliding_window):
video_start = 0
video_end = min(video_len - window_start, self.sliding_window_size)
video_clip = {"start": [video_start], "end": [video_end]}
vfeat, vmask = self._build_video_seq(
video_feature[window_start: window_start + video_end],
video_clip
)
# covers video length only.
target = torch.full_like(vmask, -100, dtype=torch.long)
target[vmask] = 0
for start, end, label_id in zip(starts, ends, label_ids):
if (window_start < end) and (start < (window_start + video_end)):
start_offset = max(0, math.floor(start) - window_start)
end_offset = min(video_end, math.ceil(end) - window_start)
target[start_offset:end_offset] = label_id
vfeats.append(vfeat)
vmasks.append(vmask)
targets.append(target)
if (video_len - window_start) <= self.sliding_window_size:
break
vfeats = torch.stack(vfeats)
vmasks = torch.stack(vmasks)
targets = torch.stack(targets)
video_targets = torch.full((video_len,), 0)
for start, end, label_id in zip(starts, ends, label_ids):
start_offset = max(0, math.floor(start))
end_offset = min(video_len, math.ceil(end))
video_targets[start_offset:end_offset] = label_id
caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).repeat(vfeats.size(0), 1)
cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).repeat(vfeats.size(0), 1)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats, # X for original code.
"vmasks": vmasks,
"targets": targets,
"video_id": video_id,
"video_len": video_len, # for later checking.
"video_targets": video_targets
}
class DiDeMoMetaProcessor(MetaProcessor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __init__(self, config):
super().__init__(config)
assert "test" in self._get_split_path(config), "DiDeMo only supports zero-shot testing for now."
with open(self._get_split_path(config)) as data_file:
json_data = json.load(data_file)
data = []
for record in json_data:
data.append((record["video"], record["description"]))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class DiDeMoTextProcessor(TextProcessor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __call__(self, text):
return self.tokenizer(text, add_special_tokens=False)["input_ids"]
class DiDeMoAligner(DSAligner):
"""
check video length.
"""
def __call__(self, video_id, video_feature, text_feature):
# print(video_feature.shape[0])
return super().__call__(video_id, video_feature, text_feature)
| 29,891 | 34.208481 | 104 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/processors/models/s3dg.py | # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Contains a PyTorch definition for Gated Separable 3D network (S3D-G)
with a text module for computing joint text-video embedding from raw text
and video input. The following code will enable you to load the HowTo100M
pretrained S3D Text-Video model from:
A. Miech, J.-B. Alayrac, L. Smaira, I. Laptev, J. Sivic and A. Zisserman,
End-to-End Learning of Visual Representations from Uncurated Instructional Videos.
https://arxiv.org/abs/1912.06430.
S3D-G was proposed by:
S. Xie, C. Sun, J. Huang, Z. Tu and K. Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
Tensorflow code: https://github.com/tensorflow/models/blob/master/research/slim/nets/s3dg.py
The S3D architecture was slightly modified with a space to depth trick for TPU
optimization.
"""
import torch as th
import torch.nn.functional as F
import torch.nn as nn
import os
import numpy as np
import re
class InceptionBlock(nn.Module):
def __init__(
self,
input_dim,
num_outputs_0_0a,
num_outputs_1_0a,
num_outputs_1_0b,
num_outputs_2_0a,
num_outputs_2_0b,
num_outputs_3_0b,
gating=True,
):
super(InceptionBlock, self).__init__()
self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, [1, 1, 1])
self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, [1, 1, 1])
self.conv_b1_b = STConv3D(
num_outputs_1_0a, num_outputs_1_0b, [3, 3, 3], padding=1, separable=True
)
self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, [1, 1, 1])
self.conv_b2_b = STConv3D(
num_outputs_2_0a, num_outputs_2_0b, [3, 3, 3], padding=1, separable=True
)
self.maxpool_b3 = th.nn.MaxPool3d((3, 3, 3), stride=1, padding=1)
self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, [1, 1, 1])
self.gating = gating
self.output_dim = (
num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b
)
if gating:
self.gating_b0 = SelfGating(num_outputs_0_0a)
self.gating_b1 = SelfGating(num_outputs_1_0b)
self.gating_b2 = SelfGating(num_outputs_2_0b)
self.gating_b3 = SelfGating(num_outputs_3_0b)
def forward(self, input):
"""Inception block
"""
b0 = self.conv_b0(input)
b1 = self.conv_b1_a(input)
b1 = self.conv_b1_b(b1)
b2 = self.conv_b2_a(input)
b2 = self.conv_b2_b(b2)
b3 = self.maxpool_b3(input)
b3 = self.conv_b3_b(b3)
if self.gating:
b0 = self.gating_b0(b0)
b1 = self.gating_b1(b1)
b2 = self.gating_b2(b2)
b3 = self.gating_b3(b3)
return th.cat((b0, b1, b2, b3), dim=1)
class SelfGating(nn.Module):
def __init__(self, input_dim):
super(SelfGating, self).__init__()
self.fc = nn.Linear(input_dim, input_dim)
def forward(self, input_tensor):
"""Feature gating as used in S3D-G.
"""
spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4])
weights = self.fc(spatiotemporal_average)
weights = th.sigmoid(weights)
return weights[:, :, None, None, None] * input_tensor
class STConv3D(nn.Module):
def __init__(
self, input_dim, output_dim, kernel_size, stride=1, padding=0, separable=False
):
super(STConv3D, self).__init__()
self.separable = separable
self.relu = nn.ReLU(inplace=True)
assert len(kernel_size) == 3
if separable and kernel_size[0] != 1:
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
if isinstance(stride, list) and len(stride) == 3:
spatial_stride = [1, stride[1], stride[2]]
temporal_stride = [stride[0], 1, 1]
else:
spatial_stride = [1, stride, stride]
temporal_stride = [stride, 1, 1]
if isinstance(padding, list) and len(padding) == 3:
spatial_padding = [0, padding[1], padding[2]]
temporal_padding = [padding[0], 0, 0]
else:
spatial_padding = [0, padding, padding]
temporal_padding = [padding, 0, 0]
if separable:
self.conv1 = nn.Conv3d(
input_dim,
output_dim,
kernel_size=spatial_kernel_size,
stride=spatial_stride,
padding=spatial_padding,
bias=False,
)
self.bn1 = nn.BatchNorm3d(output_dim)
self.conv2 = nn.Conv3d(
output_dim,
output_dim,
kernel_size=temporal_kernel_size,
stride=temporal_stride,
padding=temporal_padding,
bias=False,
)
self.bn2 = nn.BatchNorm3d(output_dim)
else:
self.conv1 = nn.Conv3d(
input_dim,
output_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False,
)
self.bn1 = nn.BatchNorm3d(output_dim)
def forward(self, input):
out = self.relu(self.bn1(self.conv1(input)))
if self.separable:
out = self.relu(self.bn2(self.conv2(out)))
return out
class MaxPool3dTFPadding(th.nn.Module):
def __init__(self, kernel_size, stride=None, padding="SAME"):
super(MaxPool3dTFPadding, self).__init__()
if padding == "SAME":
padding_shape = self._get_padding_shape(kernel_size, stride)
self.padding_shape = padding_shape
self.pad = th.nn.ConstantPad3d(padding_shape, 0)
self.pool = th.nn.MaxPool3d(kernel_size, stride, ceil_mode=True)
def _get_padding_shape(self, filter_shape, stride):
def _pad_top_bottom(filter_dim, stride_val):
pad_along = max(filter_dim - stride_val, 0)
pad_top = pad_along // 2
pad_bottom = pad_along - pad_top
return pad_top, pad_bottom
padding_shape = []
for filter_dim, stride_val in zip(filter_shape, stride):
pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val)
padding_shape.append(pad_top)
padding_shape.append(pad_bottom)
depth_top = padding_shape.pop(0)
depth_bottom = padding_shape.pop(0)
padding_shape.append(depth_top)
padding_shape.append(depth_bottom)
return tuple(padding_shape)
def forward(self, inp):
inp = self.pad(inp)
out = self.pool(inp)
return out
class Sentence_Embedding(nn.Module):
def __init__(
self,
embd_dim,
num_embeddings=66250,
word_embedding_dim=300,
token_to_word_path="dict.npy",
max_words=16,
output_dim=2048,
):
super(Sentence_Embedding, self).__init__()
self.word_embd = nn.Embedding(num_embeddings, word_embedding_dim)
self.fc1 = nn.Linear(word_embedding_dim, output_dim)
self.fc2 = nn.Linear(output_dim, embd_dim)
self.word_to_token = {}
self.max_words = max_words
token_to_word = np.load(token_to_word_path)
for i, t in enumerate(token_to_word):
self.word_to_token[t] = i + 1
def _zero_pad_tensor_token(self, tensor, size):
if len(tensor) >= size:
return tensor[:size]
else:
zero = th.zeros(size - len(tensor)).long()
return th.cat((tensor, zero), dim=0)
def _split_text(self, sentence):
w = re.findall(r"[\w']+", str(sentence))
return w
def _words_to_token(self, words):
words = [
self.word_to_token[word] for word in words if word in self.word_to_token
]
if words:
we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words)
return we
else:
return th.zeros(self.max_words).long()
def _words_to_ids(self, x):
split_x = [self._words_to_token(self._split_text(sent.lower())) for sent in x]
return th.stack(split_x, dim=0)
def forward(self, x):
x = self._words_to_ids(x)
x = self.word_embd(x)
x = F.relu(self.fc1(x))
x = th.max(x, dim=1)[0]
x = self.fc2(x)
return {'text_embedding': x}
class S3D(nn.Module):
def __init__(self, dict_path, num_classes=512, gating=True, space_to_depth=True):
super(S3D, self).__init__()
self.num_classes = num_classes
self.gating = gating
self.space_to_depth = space_to_depth
if space_to_depth:
self.conv1 = STConv3D(
24, 64, [2, 4, 4], stride=1, padding=(1, 2, 2), separable=False
)
else:
self.conv1 = STConv3D(
3, 64, [3, 7, 7], stride=2, padding=(1, 3, 3), separable=False
)
self.conv_2b = STConv3D(64, 64, [1, 1, 1], separable=False)
self.conv_2c = STConv3D(64, 192, [3, 3, 3], padding=1, separable=True)
self.gating = SelfGating(192)
self.maxpool_2a = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME"
)
self.maxpool_3a = MaxPool3dTFPadding(
kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME"
)
self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32)
self.mixed_3c = InceptionBlock(
self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64
)
self.maxpool_4a = MaxPool3dTFPadding(
kernel_size=(3, 3, 3), stride=(2, 2, 2), padding="SAME"
)
self.mixed_4b = InceptionBlock(
self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64
)
self.mixed_4c = InceptionBlock(
self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64
)
self.mixed_4d = InceptionBlock(
self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64
)
self.mixed_4e = InceptionBlock(
self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64
)
self.mixed_4f = InceptionBlock(
self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128
)
self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding="SAME"
)
self.mixed_5b = InceptionBlock(
self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128
)
self.mixed_5c = InceptionBlock(
self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128
)
self.fc = nn.Linear(self.mixed_5c.output_dim, num_classes)
self.text_module = Sentence_Embedding(num_classes,
token_to_word_path=dict_path)
def _space_to_depth(self, input):
"""3D space to depth trick for TPU optimization.
"""
B, C, T, H, W = input.shape
input = input.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2)
input = input.permute(0, 3, 5, 7, 1, 2, 4, 6)
input = input.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2)
return input
def forward(self, inputs):
"""Defines the S3DG base architecture."""
if self.space_to_depth:
inputs = self._space_to_depth(inputs)
net = self.conv1(inputs)
if self.space_to_depth:
# we need to replicate 'SAME' tensorflow padding
net = net[:, :, 1:, 1:, 1:]
net = self.maxpool_2a(net)
net = self.conv_2b(net)
net = self.conv_2c(net)
if self.gating:
net = self.gating(net)
net = self.maxpool_3a(net)
net = self.mixed_3b(net)
net = self.mixed_3c(net)
net = self.maxpool_4a(net)
net = self.mixed_4b(net)
net = self.mixed_4c(net)
net = self.mixed_4d(net)
net = self.mixed_4e(net)
net = self.mixed_4f(net)
net = self.maxpool_5a(net)
net = self.mixed_5b(net)
net = self.mixed_5c(net)
net = th.mean(net, dim=[2, 3, 4])
return {'video_embedding': self.fc(net), 'mixed_5c': net}
| 12,416 | 35.845697 | 94 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/utils/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import torch
from .shardedtensor import *
from .load_config import *
def set_seed(seed=43211):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def get_world_size():
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def get_local_rank():
return torch.distributed.get_rank() \
if torch.distributed.is_initialized() else 0
def print_on_rank0(func):
local_rank = get_local_rank()
if local_rank == 0:
print("[INFO]", func)
class RetriMeter(object):
"""
Statistics on whether retrieval yields a better pair.
"""
def __init__(self, freq=1024):
self.freq = freq
self.total = 0
self.replace = 0
self.updates = 0
def __call__(self, data):
if isinstance(data, np.ndarray):
self.replace += data.shape[0] - int((data[:, 0] == -1).sum())
self.total += data.shape[0]
elif torch.is_tensor(data):
self.replace += int(data.sum())
self.total += data.size(0)
else:
raise ValueError("unsupported RetriMeter data type.", type(data))
self.updates += 1
if get_local_rank() == 0 and self.updates % self.freq == 0:
print("[INFO]", self)
def __repr__(self):
return "RetriMeter (" + str(self.replace / self.total) \
+ "/" + str(self.replace) + "/" + str(self.total) + ")"
| 1,886 | 26.347826 | 77 | py |
rej-summ | rej-summ-main/examples/MMPT/mmpt/tasks/retritask.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import pickle
import random
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from ..processors import (
ShardedHow2MetaProcessor,
ShardedVideoProcessor,
ShardedTextProcessor,
VariedLenAligner,
)
from ..datasets import MMDataset
from .task import Task
from ..modules import vectorpool
from ..evaluators.predictor import Predictor
from ..utils import set_seed, get_local_rank, get_world_size
class RetriTask(Task):
"""abstract class for task with retrival."""
def reshape_subsample(self, sample):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return tensor
def build_dataloader(self):
"""called by `get_batch_iterator` in fairseqmmtask. """
# TODO: hard-code dataloader for retri for now and configurable in .yaml.
# reuse the `train.lst`.
self.config.dataset.split = "train"
meta_processor = ShardedHow2MetaProcessor(self.config.dataset)
video_processor = ShardedVideoProcessor(self.config.dataset)
text_processor = ShardedTextProcessor(self.config.dataset)
aligner = VariedLenAligner(self.config.dataset)
aligner.subsampling = self.config.dataset.clip_per_video
self.retri_data = MMDataset(
meta_processor, video_processor, text_processor, aligner
)
retri_sampler = DistributedSampler(self.retri_data)
infer_scale = 16
batch_size = self.config.dataset.num_video_per_batch \
* infer_scale
self.retri_dataloader = DataLoader(
self.retri_data,
collate_fn=self.retri_data.collater,
batch_size=batch_size,
shuffle=False,
sampler=retri_sampler,
num_workers=self.config.fairseq.dataset.num_workers
)
return self.retri_dataloader
def retrive_candidates(self, epoch, dataloader=None):
if get_local_rank() == 0:
print("running retrieval model.")
out_dir = os.path.join(
self.config.fairseq.checkpoint.save_dir, "retri")
os.makedirs(out_dir, exist_ok=True)
if not os.path.isfile(
os.path.join(
out_dir, "batched_e" + str(epoch) + "_videos0.pkl")
):
if dataloader is None:
dataloader = self.retri_dataloader
self.model.eval()
self.model.is_train = False
assert self.retri_data.meta_processor.data == \
self.train_data.meta_processor.data # video_ids not mutated.
self._retri_predict(epoch, dataloader)
self.model.train()
self.model.is_train = True
torch.distributed.barrier()
output = self._retri_sync(epoch, out_dir)
torch.distributed.barrier()
self.train_data.meta_processor.set_candidates(output)
return output
class VideoRetriTask(RetriTask):
"""RetriTask on video level."""
def reshape_subsample(self, sample):
if (
hasattr(self.config.dataset, "clip_per_video")
and self.config.dataset.clip_per_video is not None
and self.config.dataset.clip_per_video > 1
):
for key in sample:
if torch.is_tensor(sample[key]):
sample[key] = self.flat_subsample(sample[key])
return sample
def flat_subsample(self, tensor):
if tensor.size(0) == 1:
tensor = tensor.squeeze(0)
return Task.flat_subsample(self, tensor)
def _retri_predict(self, epoch, dataloader):
set_seed(epoch)
# save for retrival.
predictor = VideoPredictor(self.config)
predictor.predict_loop(
self.model, dataloader)
set_seed(epoch) # get the same text clips.
# retrival.
retri_predictor = VideoRetriPredictor(
self.config)
retri_predictor.predict_loop(
self.model, predictor.vecpool.retriver, epoch)
del predictor
del retri_predictor
def _retri_sync(self, epoch, out_dir):
# gpu do the same merge.
batched_videos = []
for local_rank in range(get_world_size()):
fn = os.path.join(
out_dir,
"batched_e" + str(epoch) + "_videos" + str(local_rank) + ".pkl")
with open(fn, "rb") as fr:
batched_videos.extend(pickle.load(fr))
print(
"[INFO] batched_videos",
len(batched_videos), len(batched_videos[0]))
return batched_videos
class VideoPredictor(Predictor):
def __init__(self, config):
vectorpool_cls = getattr(vectorpool, config.vectorpool_cls)
self.vecpool = vectorpool_cls(config)
def predict_loop(
self,
model,
dataloader,
early_stop=-1,
):
with torch.no_grad():
if get_local_rank() == 0:
dataloader = tqdm(dataloader)
for batch_idx, batch in enumerate(dataloader):
if batch_idx == early_stop:
break
self(batch, model)
return self.finalize()
def __call__(self, sample, model, **kwargs):
param = next(model.parameters())
dtype = param.dtype
device = param.device
subsample = sample["vfeats"].size(1)
sample = self.to_ctx(sample, device, dtype)
for key in sample:
if torch.is_tensor(sample[key]):
size = sample[key].size()
if len(size) >= 2:
batch_size = size[0] * size[1]
expanded_size = (
(batch_size,) + size[2:] if len(size) > 2
else (batch_size,)
)
sample[key] = sample[key].view(expanded_size)
outputs = model(**sample)
sample.update(outputs)
self.vecpool(sample, subsample)
def finalize(self):
print("[INFO]", self.vecpool)
if not self.vecpool.retriver.db.is_trained:
self.vecpool.retriver.finalize_training()
return self.vecpool.retriver
class VideoRetriPredictor(Predictor):
"""
Online Retrieval Predictor for Clips (used by RetriTask).
TODO: merge this with VisPredictor?
"""
def __init__(self, config):
self.pred_dir = os.path.join(
config.fairseq.checkpoint.save_dir,
"retri")
self.num_cands = config.num_cands
self.num_video_per_batch = config.dataset.num_video_per_batch
def predict_loop(
self,
model,
retriver,
epoch,
early_stop=-1
):
# a fake loop that only try to recover video vector
# from video_id.
batched_videos = []
# obtain available video_ids.
video_ids = list(retriver.videoid_to_vectoridx.keys())
dataloader = random.sample(
video_ids,
len(video_ids) // self.num_video_per_batch
)
if get_local_rank() == 0:
dataloader = tqdm(dataloader)
for batch_idx, batch in enumerate(dataloader):
# batch is one video id.
if batch_idx == early_stop:
break
video_ids = retriver.search_by_video_ids(
[batch], self.num_cands)[0]
if len(video_ids) > self.num_video_per_batch:
# we moved the center to make cluster robust.
video_ids = random.sample(video_ids, self.num_video_per_batch)
batched_videos.append(video_ids)
return self.finalize(batched_videos, epoch)
def finalize(self, batched_videos, epoch):
fn = os.path.join(
self.pred_dir,
"batched_e" + str(epoch) + "_videos" + str(get_local_rank()) + ".pkl")
with open(fn, "wb") as fw:
pickle.dump(batched_videos, fw, pickle.HIGHEST_PROTOCOL)
return batched_videos
| 8,413 | 32.125984 | 82 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.